code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , a_ , a_ , a_ = True , a_ = False ) -> Tuple:
lowercase : Any = scheduler
lowercase : Dict = optimizers if isinstance(a_ , (list, tuple) ) else [optimizers]
lowercase : Any = split_batches
lowercase : List[Any] = step_with_optimizer
lowercase : Optional[int] = GradientState()
def a__ ( self , *a_ , **a_ ) -> Tuple:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*a_ , **a_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*a_ , **a_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowercase : Optional[Any] = AcceleratorState().num_processes
for _ in range(a_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*a_ , **a_ )
else:
self.scheduler.step(*a_ , **a_ )
def a__ ( self ) -> int:
return self.scheduler.get_last_lr()
def a__ ( self ) -> str:
return self.scheduler.state_dict()
def a__ ( self , a_ ) -> Optional[Any]:
self.scheduler.load_state_dict(a_ )
def a__ ( self ) -> List[str]:
return self.scheduler.get_lr()
def a__ ( self , *a_ , **a_ ) -> Dict:
return self.scheduler.print_lr(*a_ , **a_ )
| 372 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , )
def a__ ( self , a_ = "auto" ) -> Dict:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a_ )
def a__ ( self ) -> Dict:
self.enable_attention_slicing(a_ )
@torch.no_grad()
def __call__( self , a_ , a_ = 5_1_2 , a_ = 5_1_2 , a_ = 5_0 , a_ = 7.5 , a_ = None , a_ = 1 , a_ = 0.0 , a_ = None , a_ = None , a_ = "pil" , a_ = True , a_ = None , a_ = 1 , a_ = None , **a_ , ) -> Union[str, Any]:
if isinstance(a_ , a_ ):
lowercase : Tuple = 1
elif isinstance(a_ , a_ ):
lowercase : int = len(a_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(a_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a_ , a_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(a_ )}.''' )
# get prompt text embeddings
lowercase : int = self.tokenizer(
a_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
lowercase : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase , lowercase , lowercase : List[Any] = text_embeddings.shape
lowercase : Tuple = text_embeddings.repeat(1 , a_ , 1 )
lowercase : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , a_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase : List[str]
if negative_prompt is None:
lowercase : Optional[Any] = [""]
elif type(a_ ) is not type(a_ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(a_ )} !='''
F''' {type(a_ )}.''' )
elif isinstance(a_ , a_ ):
lowercase : Tuple = [negative_prompt]
elif batch_size != len(a_ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(a_ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
lowercase : Dict = negative_prompt
lowercase : Tuple = text_input_ids.shape[-1]
lowercase : Any = self.tokenizer(
a_ , padding="max_length" , max_length=a_ , truncation=a_ , return_tensors="pt" , )
lowercase : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase : Union[str, Any] = uncond_embeddings.shape[1]
lowercase : Tuple = uncond_embeddings.repeat(a_ , a_ , 1 )
lowercase : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , a_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4)
lowercase : Union[str, Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase : List[str] = torch.randn(
a_ , generator=a_ , device="cpu" , dtype=a_ ).to(self.device )
lowercase : Any = torch.randn(a_ , generator=a_ , device="cpu" , dtype=a_ ).to(
self.device )
else:
lowercase : List[Any] = torch.randn(
a_ , generator=a_ , device=self.device , dtype=a_ )
lowercase : Optional[Any] = torch.randn(a_ , generator=a_ , device=self.device , dtype=a_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowercase : Dict = latents_reference.to(self.device )
lowercase : int = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase : int = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase : Union[str, Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase : Optional[int] = 0 if dx < 0 else dx
lowercase : Tuple = 0 if dy < 0 else dy
lowercase : Optional[int] = max(-dx , 0 )
lowercase : int = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowercase : Union[str, Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(a_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase : List[Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase : Optional[Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase : List[str] = {}
if accepts_eta:
lowercase : List[str] = eta
for i, t in enumerate(self.progress_bar(a_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase : int = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
lowercase : Dict = self.unet(a_ , a_ , encoder_hidden_states=a_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase , lowercase : Dict = noise_pred.chunk(2 )
lowercase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase : int = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a_ , a_ , a_ )
lowercase : List[Any] = 1 / 0.1_82_15 * latents
lowercase : Tuple = self.vae.decode(a_ ).sample
lowercase : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowercase : Tuple = self.feature_extractor(self.numpy_to_pil(a_ ) , return_tensors="pt" ).to(
self.device )
lowercase , lowercase : Tuple = self.safety_checker(
images=a_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowercase : Union[str, Any] = None
if output_type == "pil":
lowercase : Union[str, Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=a_ , nsfw_content_detected=a_ )
| 372 | 1 |
from typing import Any
import numpy as np
def A_ ( snake_case : np.ndarray ) -> bool:
'''simple docstring'''
return np.array_equal(snake_case , matrix.conjugate().T )
def A_ ( snake_case : np.ndarray , snake_case : np.ndarray ) -> Any:
'''simple docstring'''
__UpperCamelCase = v.conjugate().T
__UpperCamelCase = v_star.dot(snake_case )
assert isinstance(snake_case , np.ndarray )
return (v_star_dot.dot(snake_case )) / (v_star.dot(snake_case ))
def A_ ( ) -> None:
'''simple docstring'''
__UpperCamelCase = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__UpperCamelCase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case ), f"{a} is not hermitian."
print(rayleigh_quotient(snake_case , snake_case ) )
__UpperCamelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case ), f"{a} is not hermitian."
assert rayleigh_quotient(snake_case , snake_case ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 714 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( snake_case : List[str] ) -> List[str]:
'''simple docstring'''
print('''Loading config file...''' )
def flatten_yaml_as_dict(snake_case : Optional[int] , snake_case : List[Any]="" , snake_case : str="." ):
__UpperCamelCase = []
for k, v in d.items():
__UpperCamelCase = parent_key + sep + k if parent_key else k
if isinstance(snake_case , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case , snake_case , sep=snake_case ).items() )
else:
items.append((new_key, v) )
return dict(snake_case )
__UpperCamelCase = argparse.Namespace()
with open(snake_case , '''r''' ) as yaml_file:
try:
__UpperCamelCase = yaml.load(snake_case , Loader=yaml.FullLoader )
__UpperCamelCase = flatten_yaml_as_dict(snake_case )
for k, v in flat_cfg.items():
setattr(snake_case , snake_case , snake_case )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(snake_case , str(snake_case ) ) )
return config
def A_ ( snake_case : List[Any] , snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = MobileViTVaConfig()
__UpperCamelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__UpperCamelCase = 1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase = 384
else:
__UpperCamelCase = 256
__UpperCamelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__UpperCamelCase = 21000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase = 384
else:
__UpperCamelCase = 256
__UpperCamelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__UpperCamelCase = 151
__UpperCamelCase = 512
__UpperCamelCase = '''ade20k-id2label.json'''
__UpperCamelCase = True
elif task_name.startswith('''voc_''' ):
__UpperCamelCase = 21
__UpperCamelCase = 512
__UpperCamelCase = '''pascal-voc-id2label.json'''
__UpperCamelCase = True
# orig_config
__UpperCamelCase = load_orig_config_file(snake_case )
assert getattr(snake_case , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
__UpperCamelCase = getattr(snake_case , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(snake_case , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__UpperCamelCase = getattr(snake_case , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__UpperCamelCase = getattr(snake_case , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
__UpperCamelCase = '''huggingface/label-files'''
__UpperCamelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase = {int(snake_case ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def A_ ( snake_case : List[Any] , snake_case : int , snake_case : Any ) -> str:
'''simple docstring'''
__UpperCamelCase = dct.pop(snake_case )
__UpperCamelCase = val
def A_ ( snake_case : int , snake_case : List[Any]=False ) -> Optional[Any]:
'''simple docstring'''
if base_model:
__UpperCamelCase = ''''''
else:
__UpperCamelCase = '''mobilevitv2.'''
__UpperCamelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__UpperCamelCase = k[8:]
else:
__UpperCamelCase = k
if ".block." in k:
__UpperCamelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
__UpperCamelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
__UpperCamelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
__UpperCamelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
__UpperCamelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
__UpperCamelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
__UpperCamelCase = [0, 1]
elif i == 4:
__UpperCamelCase = [0, 1, 2, 3]
elif i == 5:
__UpperCamelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
__UpperCamelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
__UpperCamelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
__UpperCamelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__UpperCamelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
__UpperCamelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
__UpperCamelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
__UpperCamelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
__UpperCamelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def A_ ( snake_case : List[str] ) -> str:
'''simple docstring'''
__UpperCamelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(snake_case )
for k in keys_to_ignore:
state_dict.pop(snake_case , snake_case )
def A_ ( ) -> str:
'''simple docstring'''
__UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__UpperCamelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def A_ ( snake_case : Dict , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Optional[int] ) -> int:
'''simple docstring'''
__UpperCamelCase = get_mobilevitva_config(snake_case , snake_case )
# load original state_dict
__UpperCamelCase = torch.load(snake_case , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__UpperCamelCase = MobileViTVaForSemanticSegmentation(snake_case ).eval()
__UpperCamelCase = False
else:
__UpperCamelCase = MobileViTVaForImageClassification(snake_case ).eval()
__UpperCamelCase = False
# remove and rename some keys of load the original model
__UpperCamelCase = checkpoint
remove_unused_keys(snake_case )
__UpperCamelCase = create_rename_keys(snake_case , base_model=snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
# load modified state_dict
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__UpperCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCamelCase = model(**snake_case )
# verify classification model
if task_name.startswith('''imagenet''' ):
__UpperCamelCase = outputs.logits
__UpperCamelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__UpperCamelCase = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3] , snake_case , atol=1e-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
lowercase__ : Tuple = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 451 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 62 | """simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_UpperCamelCase : int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 599 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["PerceiverFeatureExtractor"]
A_ = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 384 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( __SCREAMING_SNAKE_CASE : list[list[int]] ) -> bool:
__SCREAMING_SNAKE_CASE : int = len(__SCREAMING_SNAKE_CASE )
# We need to create solution object to save path.
__SCREAMING_SNAKE_CASE : Any = [[0 for _ in range(__SCREAMING_SNAKE_CASE )] for _ in range(__SCREAMING_SNAKE_CASE )]
__SCREAMING_SNAKE_CASE : List[Any] = run_maze(__SCREAMING_SNAKE_CASE , 0 , 0 , __SCREAMING_SNAKE_CASE )
if solved:
print('''\n'''.join(str(__SCREAMING_SNAKE_CASE ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def A_ ( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[int]] ) -> bool:
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
# Final check point.
if i == j == (size - 1):
__SCREAMING_SNAKE_CASE : List[Any] = 1
return True
__SCREAMING_SNAKE_CASE : List[Any] = (not i < 0) and (not j < 0) # Check lower bounds
__SCREAMING_SNAKE_CASE : Tuple = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__SCREAMING_SNAKE_CASE : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
# check for directions
if (
run_maze(__SCREAMING_SNAKE_CASE , i + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
or run_maze(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , j + 1 , __SCREAMING_SNAKE_CASE )
or run_maze(__SCREAMING_SNAKE_CASE , i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
or run_maze(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , j - 1 , __SCREAMING_SNAKE_CASE )
):
return True
__SCREAMING_SNAKE_CASE : int = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_A = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def A_ ( __SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
for pegasus_name, hf_name in PATTERNS:
__SCREAMING_SNAKE_CASE : List[str] = k.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return k
def A_ ( __SCREAMING_SNAKE_CASE : dict , __SCREAMING_SNAKE_CASE : dict ) -> PegasusForConditionalGeneration:
__SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = PegasusForConditionalGeneration(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = torch_model.model.state_dict()
__SCREAMING_SNAKE_CASE : Dict = {}
for k, v in tf_weights.items():
__SCREAMING_SNAKE_CASE : List[str] = rename_state_dict_key(__SCREAMING_SNAKE_CASE )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__SCREAMING_SNAKE_CASE : Dict = v.T
__SCREAMING_SNAKE_CASE : Any = torch.tensor(__SCREAMING_SNAKE_CASE , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__SCREAMING_SNAKE_CASE : int = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__SCREAMING_SNAKE_CASE : Optional[Any] = mapping['''shared.weight''']
__SCREAMING_SNAKE_CASE : Tuple = mapping['''shared.weight''']
__SCREAMING_SNAKE_CASE : List[Any] = {k: torch.zeros_like(__SCREAMING_SNAKE_CASE ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch_model.model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def A_ ( __SCREAMING_SNAKE_CASE : Union[str, Any]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
__SCREAMING_SNAKE_CASE : Any = tf.train.list_variables(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(__SCREAMING_SNAKE_CASE , desc='''converting tf checkpoint to dict''' ):
__SCREAMING_SNAKE_CASE : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__SCREAMING_SNAKE_CASE : Any = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Dict = array
return tf_weights
def A_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
# save tokenizer first
__SCREAMING_SNAKE_CASE : List[str] = Path(__SCREAMING_SNAKE_CASE ).parent.name
__SCREAMING_SNAKE_CASE : Optional[int] = task_specific_params[f"""summarization_{dataset}"""]['''max_position_embeddings''']
__SCREAMING_SNAKE_CASE : List[str] = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=__SCREAMING_SNAKE_CASE )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__SCREAMING_SNAKE_CASE )
# convert model
__SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : str = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
__SCREAMING_SNAKE_CASE : Dict = task_specific_params
__SCREAMING_SNAKE_CASE : Optional[int] = convert_pegasus(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(__SCREAMING_SNAKE_CASE , Path(__SCREAMING_SNAKE_CASE ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_A = parser.parse_args()
if args.save_dir is None:
_A = Path(args.tf_ckpt_path).parent.name
_A = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 158 | 1 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A__:
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str=99 , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Optional[int]=16 , __SCREAMING_SNAKE_CASE : List[str]=7 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : Tuple=30 , __SCREAMING_SNAKE_CASE : Optional[int]=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : str=None , ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
__SCREAMING_SNAKE_CASE = self.decoder_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = decoder_seq_length
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 1
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = TrOCRDecoder(config=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ).eval()
__SCREAMING_SNAKE_CASE = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) + 1 )
__SCREAMING_SNAKE_CASE = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 )
def _a ( self : Dict ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class A__( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowerCAmelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCAmelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCAmelCase = True
lowerCAmelCase = False
def _a ( self : Any ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TrOCRStandaloneDecoderModelTester(self , is_training=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
| 690 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A__:
lowerCAmelCase = MBartConfig
lowerCAmelCase = {}
lowerCAmelCase = '''gelu'''
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=99 , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : List[str]=37 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Any=20 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_mbart_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, inputs_dict
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFMBartModel(config=__SCREAMING_SNAKE_CASE ).get_decoder()
__SCREAMING_SNAKE_CASE = inputs_dict['''input_ids''']
__SCREAMING_SNAKE_CASE = input_ids[:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict['''attention_mask'''][:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict['''head_mask''']
__SCREAMING_SNAKE_CASE = 1
# first forward pass
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
__SCREAMING_SNAKE_CASE = past_key_values[1]
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> Optional[int]:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A__( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowerCAmelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowerCAmelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = False
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFMBartModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE )
@require_sentencepiece
@require_tokenizers
@require_tf
class A__( unittest.TestCase ):
lowerCAmelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
lowerCAmelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
lowerCAmelCase = '''facebook/mbart-large-en-ro'''
@cached_property
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _a ( self : Any , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.translate_src_text(**__SCREAMING_SNAKE_CASE )
self.assertListEqual(self.expected_text , __SCREAMING_SNAKE_CASE )
def _a ( self : Any , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **__SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
__SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
return generated_words
@slow
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 690 | 1 |
from math import ceil
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =list(range(0 , lowercase__ ) )
UpperCAmelCase_ =[item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCAmelCase_ =[]
for i in device_map_blocks:
if device_map_blocks.count(lowercase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowercase__ )
# Missing blocks
UpperCAmelCase_ =[i for i in blocks if i not in device_map_blocks]
UpperCAmelCase_ =[i for i in device_map_blocks if i not in blocks]
if len(lowercase__ ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(lowercase__ ) )
if len(lowercase__ ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(lowercase__ ) )
if len(lowercase__ ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(lowercase__ ) )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =list(range(lowercase__ ) )
UpperCAmelCase_ =int(ceil(n_layers / len(lowercase__ ) ) )
UpperCAmelCase_ =[layers[i : i + n_blocks] for i in range(0 , lowercase__ , lowercase__ )]
return dict(zip(lowercase__ , lowercase__ ) )
| 54 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = "xlm"
__magic_name__ : str = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : List[Any] , lowerCAmelCase : str=30145 , lowerCAmelCase : List[str]=2048 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : str=16 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=False , lowerCAmelCase : Dict=1 , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : Optional[Any]=2048**-0.5 , lowerCAmelCase : Tuple=1E-12 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[int]=1 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Dict=5 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[Any]="first" , lowerCAmelCase : Tuple=True , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[str]=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : int=5 , lowerCAmelCase : Tuple=5 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Any=0 , lowerCAmelCase : int=2 , lowerCAmelCase : List[Any]=0 , **lowerCAmelCase : List[Any] , )-> Any:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = emb_dim
UpperCAmelCase = n_layers
UpperCAmelCase = n_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = gelu_activation
UpperCAmelCase = sinusoidal_embeddings
UpperCAmelCase = causal
UpperCAmelCase = asm
UpperCAmelCase = n_langs
UpperCAmelCase = use_lang_emb
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = bos_index
UpperCAmelCase = eos_index
UpperCAmelCase = pad_index
UpperCAmelCase = unk_index
UpperCAmelCase = mask_index
UpperCAmelCase = is_encoder
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = embed_init_std
UpperCAmelCase = init_std
UpperCAmelCase = summary_type
UpperCAmelCase = summary_use_proj
UpperCAmelCase = summary_activation
UpperCAmelCase = summary_proj_to_labels
UpperCAmelCase = summary_first_dropout
UpperCAmelCase = start_n_top
UpperCAmelCase = end_n_top
UpperCAmelCase = mask_token_id
UpperCAmelCase = lang_id
if "n_words" in kwargs:
UpperCAmelCase = kwargs['''n_words''']
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , **lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
@property
def a__( self : List[str] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 210 | 0 |
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
SCREAMING_SNAKE_CASE_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 48_000,
'''sample_size''': 65_536,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 48_000,
'''sample_size''': 65_536,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 48_000,
'''sample_size''': 131_072,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 16_000,
'''sample_size''': 65_536,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 16_000,
'''sample_size''': 65_536,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 16_000,
'''sample_size''': 65_536,
},
}
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
__lowerCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
pass
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ ) -> Any:
super().__init__()
__lowerCAmelCase = DiffusionAttnUnetaD(snake_case_ , n_attn_layers=4 )
__lowerCAmelCase = deepcopy(self.diffusion )
__lowerCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=snake_case_ )
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = MODELS_MAP[model_name]["""url"""]
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
SCREAMING_SNAKE_CASE_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
SCREAMING_SNAKE_CASE_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
SCREAMING_SNAKE_CASE_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
SCREAMING_SNAKE_CASE_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
SCREAMING_SNAKE_CASE_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
SCREAMING_SNAKE_CASE_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def lowercase (_lowerCAmelCase ):
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowercase (_lowerCAmelCase ):
for key, value in ATTN_MAP.items():
if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return name.replace(_lowerCAmelCase , _lowerCAmelCase )
elif name.startswith(_lowerCAmelCase ):
return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def lowercase (_lowerCAmelCase , _lowerCAmelCase=13 ):
__lowerCAmelCase = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
__lowerCAmelCase = 0
if string.startswith("""net.3.""" ):
depth += 1
__lowerCAmelCase = string[6:]
elif string.startswith("""net.""" ):
__lowerCAmelCase = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
__lowerCAmelCase = string[7:]
if string.startswith("""main.""" ):
__lowerCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
__lowerCAmelCase = string[:2]
__lowerCAmelCase = string[2:]
else:
__lowerCAmelCase = string[0]
__lowerCAmelCase = string[1:]
if depth == max_depth:
__lowerCAmelCase = MID_NUM_TO_LAYER[layer_num]
__lowerCAmelCase = """mid_block"""
elif depth > 0 and int(_lowerCAmelCase ) < 7:
__lowerCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
__lowerCAmelCase = f"""down_blocks.{depth}"""
elif depth > 0 and int(_lowerCAmelCase ) > 7:
__lowerCAmelCase = UP_NUM_TO_LAYER[layer_num]
__lowerCAmelCase = f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
__lowerCAmelCase = DEPTH_0_TO_LAYER[layer_num]
__lowerCAmelCase = f"""up_blocks.{max_depth - 1}""" if int(_lowerCAmelCase ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
__lowerCAmelCase = string_left[1:]
if "resnets" in new_layer:
__lowerCAmelCase = convert_resconv_naming(_lowerCAmelCase )
elif "attentions" in new_layer:
__lowerCAmelCase = convert_attn_naming(_lowerCAmelCase )
__lowerCAmelCase = new_string_left
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = prefix + """.""" + new_layer + """.""" + string_left
else:
__lowerCAmelCase = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
__lowerCAmelCase = rename(_lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
__lowerCAmelCase = v
return new_state_dict
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if len(_lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
__lowerCAmelCase = v[:, :, 0]
else:
# bias
__lowerCAmelCase = v
else:
# qkv matrices
__lowerCAmelCase = v.shape[0]
__lowerCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__lowerCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__lowerCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__lowerCAmelCase = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
__lowerCAmelCase = download(_lowerCAmelCase )
__lowerCAmelCase = MODELS_MAP[model_name]["""sample_rate"""]
__lowerCAmelCase = MODELS_MAP[model_name]["""sample_size"""]
__lowerCAmelCase = Object()
__lowerCAmelCase = sample_size
__lowerCAmelCase = sample_rate
__lowerCAmelCase = 0
__lowerCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase )
__lowerCAmelCase = diffusers_model.state_dict()
__lowerCAmelCase = DiffusionUncond(_lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["""state_dict"""] )
__lowerCAmelCase = orig_model.diffusion_ema.eval()
__lowerCAmelCase = orig_model.state_dict()
__lowerCAmelCase = rename_orig_weights(_lowerCAmelCase )
__lowerCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__lowerCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_lowerCAmelCase ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith("""kernel""" ) for k in list(_lowerCAmelCase ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
__lowerCAmelCase = value.squeeze()
__lowerCAmelCase = value
diffusers_model.load_state_dict(_lowerCAmelCase )
__lowerCAmelCase = 100
__lowerCAmelCase = 33
__lowerCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase )
__lowerCAmelCase = torch.manual_seed(_lowerCAmelCase )
__lowerCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase )
__lowerCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1]
__lowerCAmelCase = get_crash_schedule(_lowerCAmelCase )
__lowerCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
__lowerCAmelCase = torch.manual_seed(33 )
__lowerCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios
__lowerCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} )
__lowerCAmelCase = generated.clamp(-1 , 1 )
__lowerCAmelCase = (generated - audio).abs().sum()
__lowerCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , _lowerCAmelCase )
print("""Diff max""" , _lowerCAmelCase )
assert diff_max < 1E-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
main(args)
| 573 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def A__ ( self , snake_case_ ) -> Optional[int]:
with open(snake_case_ , encoding="""utf-8""" ) as input_file:
__lowerCAmelCase = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
__lowerCAmelCase = input_file.read()
__lowerCAmelCase = regexp.search(snake_case_ )
return match
def A__ ( self , snake_case_ ) -> Union[str, Any]:
with open(snake_case_ , encoding="""utf-8""" ) as input_file:
__lowerCAmelCase = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
__lowerCAmelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__lowerCAmelCase = regexp.finditer(snake_case_ )
__lowerCAmelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = Path("""./datasets""" )
__lowerCAmelCase = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case_ ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def A__ ( self ) -> Tuple:
__lowerCAmelCase = Path("""./datasets""" )
__lowerCAmelCase = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case_ ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 573 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class A__ :
def __init__( self , A_ , ):
'''simple docstring'''
UpperCamelCase : str = parent
UpperCamelCase : List[str] = 13
UpperCamelCase : Optional[Any] = 7
UpperCamelCase : Tuple = True
UpperCamelCase : List[Any] = True
UpperCamelCase : List[str] = True
UpperCamelCase : List[str] = 99
UpperCamelCase : List[str] = 32
UpperCamelCase : Dict = 2
UpperCamelCase : Dict = 4
UpperCamelCase : Optional[Any] = 37
UpperCamelCase : Optional[Any] = "gelu"
UpperCamelCase : List[str] = 0.1
UpperCamelCase : Any = 0.1
UpperCamelCase : str = 512
UpperCamelCase : Any = 16
UpperCamelCase : int = 2
UpperCamelCase : Any = 0.02
UpperCamelCase : Optional[Any] = 3
UpperCamelCase : Optional[int] = 4
UpperCamelCase : List[Any] = None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Any = None
if self.use_input_mask:
UpperCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Optional[Any] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : Tuple = None
if self.use_labels:
UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : List[Any] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self ):
'''simple docstring'''
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase : Optional[Any] = True
UpperCamelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Any = TFEsmModel(config=A_ )
UpperCamelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCamelCase : Any = model(A_ )
UpperCamelCase : Union[str, Any] = [input_ids, input_mask]
UpperCamelCase : str = model(A_ )
UpperCamelCase : Dict = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Any = True
UpperCamelCase : Tuple = TFEsmModel(config=A_ )
UpperCamelCase : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
UpperCamelCase : Any = model(A_ )
UpperCamelCase : Optional[Any] = [input_ids, input_mask]
UpperCamelCase : str = model(A_ , encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
UpperCamelCase : Tuple = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = TFEsmForMaskedLM(config=A_ )
UpperCamelCase : Dict = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.num_labels
UpperCamelCase : Optional[int] = TFEsmForTokenClassification(config=A_ )
UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCamelCase : Tuple = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
UpperCamelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :Tuple = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :int = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = TFEsmModelTester(self )
UpperCamelCase : Union[str, Any] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Tuple = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip("Protein models do not support embedding resizing." )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@unittest.skip("Protein models do not support embedding resizing." )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[Any] = model_class(A_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCamelCase : Tuple = model.get_bias()
assert isinstance(A_ , A_ )
for k, v in name.items():
assert isinstance(A_ , tf.Variable )
else:
UpperCamelCase : Tuple = model.get_output_embeddings()
assert x is None
UpperCamelCase : Optional[int] = model.get_bias()
assert name is None
@require_tf
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase : Dict = model(A_ )[0]
UpperCamelCase : Optional[int] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , A_ )
# compare the actual values for a slice.
UpperCamelCase : int = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
UpperCamelCase : Optional[int] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCamelCase : Dict = model(A_ )[0]
# compare the actual values for a slice.
UpperCamelCase : str = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 629 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=64 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Dict = parent
UpperCamelCase : int = batch_size
UpperCamelCase : Dict = seq_length
UpperCamelCase : Union[str, Any] = is_training
UpperCamelCase : int = use_input_mask
UpperCamelCase : Optional[Any] = use_token_type_ids
UpperCamelCase : Optional[Any] = use_labels
UpperCamelCase : str = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Any = embedding_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase : List[str] = max_position_embeddings
UpperCamelCase : List[Any] = type_vocab_size
UpperCamelCase : Any = type_sequence_label_size
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : str = num_labels
UpperCamelCase : List[str] = num_choices
UpperCamelCase : Tuple = scope
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : List[str] = None
if self.use_input_mask:
UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = None
if self.use_token_type_ids:
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Dict = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Dict = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = MegatronBertModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Optional[Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ )
UpperCamelCase : Dict = model(A_ , token_type_ids=A_ )
UpperCamelCase : Union[str, Any] = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = MegatronBertForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Optional[int] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = MegatronBertForCausalLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Dict = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = MegatronBertForNextSentencePrediction(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Dict = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = MegatronBertForPreTraining(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : str = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , next_sentence_label=A_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = MegatronBertForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = self.num_labels
UpperCamelCase : Optional[int] = MegatronBertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.num_labels
UpperCamelCase : List[str] = MegatronBertForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[str] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = self.num_choices
UpperCamelCase : int = MegatronBertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Tuple = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Any = config_and_inputs
UpperCamelCase : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Tuple = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[Any] = True
# test_resize_embeddings = False
_UpperCAmelCase :Optional[Any] = False
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : Any = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class in get_values(A_ ):
UpperCamelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A_ )
UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = MegatronBertModelTester(self )
UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*A_ )
def A_ ( _lowerCAmelCase ) -> int:
return torch.tensor(
_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase , )
__lowerCamelCase : Optional[int] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
UpperCamelCase : Union[str, Any] = os.path.join(os.environ["MYDIR"] , A_ )
UpperCamelCase : List[Any] = MegatronBertModel.from_pretrained(A_ )
model.to(A_ )
model.half()
UpperCamelCase : Optional[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
UpperCamelCase : Tuple = model(A_ )[0]
UpperCamelCase : Dict = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , A_ )
UpperCamelCase : Union[str, Any] = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
UpperCamelCase : List[str] = output[0, ii, jj]
UpperCamelCase : List[Any] = expected[3 * ii + jj]
UpperCamelCase : Optional[Any] = "ii={} jj={} a={} b={}".format(A_ , A_ , A_ , A_ )
self.assertTrue(math.isclose(A_ , A_ , rel_tol=A_ , abs_tol=A_ ) , msg=A_ )
| 629 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ : Dict = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = ['''DPTFeatureExtractor''']
lowerCAmelCase_ : Any = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 156 |
'''simple docstring'''
def __A ( lowerCAmelCase_ ):
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_UpperCAmelCase : Any = grid[0]
for row_n in range(1 , len(lowerCAmelCase_ ) ):
_UpperCAmelCase : Any = grid[row_n]
_UpperCAmelCase : Optional[Any] = fill_row(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = grid[row_n]
return grid[-1][-1]
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowerCAmelCase_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 156 | 1 |
def lowerCamelCase__ ( _lowerCamelCase ) ->list[list[int]]:
_UpperCAmelCase =[]
if len(_lowerCamelCase ) == 1:
return [nums.copy()]
for _ in range(len(_lowerCamelCase ) ):
_UpperCAmelCase =nums.pop(0 )
_UpperCAmelCase =permute(_lowerCamelCase )
for perm in permutations:
perm.append(_lowerCamelCase )
result.extend(_lowerCamelCase )
nums.append(_lowerCamelCase )
return result
def lowerCamelCase__ ( _lowerCamelCase ) ->str:
def backtrack(_lowerCamelCase ):
if start == len(_lowerCamelCase ) - 1:
output.append(nums[:] )
else:
for i in range(_lowerCamelCase , len(_lowerCamelCase ) ):
_UpperCAmelCase , _UpperCAmelCase =nums[i], nums[start]
backtrack(start + 1 )
_UpperCAmelCase , _UpperCAmelCase =nums[i], nums[start] # backtrack
_UpperCAmelCase =[]
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
snake_case__ : Tuple = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 408 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 408 | 1 |
class UpperCamelCase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : list ):
'''simple docstring'''
lowercase_ = set_counts
lowercase_ = max(UpperCamelCase__ )
lowercase_ = len(UpperCamelCase__ )
lowercase_ = [1] * num_sets
lowercase_ = list(range(UpperCamelCase__ ) )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = self.get_parent(UpperCamelCase__ )
lowercase_ = self.get_parent(UpperCamelCase__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ = 0
lowercase_ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ = 0
lowercase_ = src_parent
lowercase_ = self.set_counts[src_parent]
lowercase_ = max(self.max_set , UpperCamelCase__ )
return True
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : int ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 713 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values']
def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = size if size is not None else {"""shortest_edge""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase_ = int((256 / 224) * size["""shortest_edge"""] )
lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowercase_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
_a = number_of_bytes // partitions
_a = []
for i in range(UpperCamelCase ):
_a = i * bytes_per_partition + 1
_a = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''08x''' )[-8:]
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = B''''''
for char in message:
bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase ) , 512 ):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase , 2 )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return (a + b) % 2**32
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = preprocess(UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67452301
_a = 0Xefcdab89
_a = 0X98badcfe
_a = 0X10325476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_ : Any = ['''model.decoder.embed_positions.weights''']
def __A ( lowerCAmelCase_ ):
if "emb" in name:
_UpperCAmelCase : Optional[int] = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
_UpperCAmelCase : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
_UpperCAmelCase : Optional[int] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
_UpperCAmelCase : str = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
_UpperCAmelCase : Optional[int] = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
_UpperCAmelCase : int = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
_UpperCAmelCase : Dict = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCAmelCase : List[Any] = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = list(state_dict.keys() )
_UpperCAmelCase : str = {}
for key in keys:
_UpperCAmelCase : Any = state_dict.pop(lowerCAmelCase_ )
_UpperCAmelCase : Any = rename_keys(lowerCAmelCase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCAmelCase : List[Any] = val[:hidden_size, :]
_UpperCAmelCase : str = val[hidden_size : 2 * hidden_size, :]
_UpperCAmelCase : Any = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCAmelCase : List[Any] = val
else:
_UpperCAmelCase : Any = val
return state_dict, enc_dec_proj_state_dict
def __A ( lowerCAmelCase_ ):
if checkpoint == "small":
# default config values
_UpperCAmelCase : Dict = 1024
_UpperCAmelCase : int = 24
_UpperCAmelCase : Tuple = 16
elif checkpoint == "medium":
_UpperCAmelCase : List[Any] = 1536
_UpperCAmelCase : Any = 48
_UpperCAmelCase : str = 24
elif checkpoint == "large":
_UpperCAmelCase : Tuple = 2048
_UpperCAmelCase : Union[str, Any] = 48
_UpperCAmelCase : List[Any] = 32
else:
raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
_UpperCAmelCase : str = MusicgenDecoderConfig(
hidden_size=lowerCAmelCase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCAmelCase_ , num_attention_heads=lowerCAmelCase_ , )
return config
@torch.no_grad()
def __A ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="cpu" ):
_UpperCAmelCase : Tuple = MusicGen.get_pretrained(lowerCAmelCase_ , device=lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = decoder_config_from_checkpoint(lowerCAmelCase_ )
_UpperCAmelCase : str = fairseq_model.lm.state_dict()
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = rename_state_dict(
lowerCAmelCase_ , hidden_size=decoder_config.hidden_size )
_UpperCAmelCase : Dict = TaEncoderModel.from_pretrained("""t5-base""" )
_UpperCAmelCase : Dict = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
_UpperCAmelCase : List[Any] = MusicgenForCausalLM(lowerCAmelCase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCAmelCase , _UpperCAmelCase : List[Any] = decoder.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" )
if len(lowerCAmelCase_ ) > 0:
raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
_UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=lowerCAmelCase_ , audio_encoder=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCAmelCase_ )
# check we can do a forward pass
_UpperCAmelCase : str = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_UpperCAmelCase : Optional[int] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(input_ids=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("""t5-base""" )
_UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
_UpperCAmelCase : Optional[Any] = MusicgenProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
# set the appropriate bos/pad token ids
_UpperCAmelCase : Any = 2048
_UpperCAmelCase : str = 2048
# set other default generation config params
_UpperCAmelCase : List[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
if repo_id:
logger.info(f"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(lowerCAmelCase_ )
processor.push_to_hub(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 156 |
'''simple docstring'''
import os
def __A ( lowerCAmelCase_ = "input.txt" ):
with open(os.path.join(os.path.dirname(lowerCAmelCase_ ) , lowerCAmelCase_ ) ) as input_file:
_UpperCAmelCase : Tuple = [
[int(lowerCAmelCase_ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
_UpperCAmelCase : Dict = len(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = len(matrix[0] )
_UpperCAmelCase : Dict = [[-1 for _ in range(lowerCAmelCase_ )] for _ in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
_UpperCAmelCase : Dict = matrix[i][0]
for j in range(1 , lowerCAmelCase_ ):
for i in range(lowerCAmelCase_ ):
_UpperCAmelCase : int = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_UpperCAmelCase : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"{solution() = }")
| 156 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
if len(SCREAMING_SNAKE_CASE ) <= 1:
return [tuple(SCREAMING_SNAKE_CASE )]
_lowercase : List[Any] = []
def generate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = [0] * n
res.append(tuple(SCREAMING_SNAKE_CASE ) )
_lowercase : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_lowercase , _lowercase : Optional[Any] = arr[i], arr[0]
else:
_lowercase , _lowercase : Union[str, Any] = arr[i], arr[c[i]]
res.append(tuple(SCREAMING_SNAKE_CASE ) )
c[i] += 1
_lowercase : Tuple = 0
else:
_lowercase : str = 0
i += 1
generate(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 66 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : list[tuple[float, float]] ):
_lowerCamelCase : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCamelCase : int = len(__A ) - 1
def lowerCamelCase_ ( self : Optional[int],__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,__A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__A ),5 ) == 1
return output_values
def lowerCamelCase_ ( self : int,__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : List[Any] = self.basis_function(__A )
_lowerCamelCase : str = 0.0
_lowerCamelCase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase_ ( self : Optional[Any],__A : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_lowerCamelCase : list[float] = [] # x coordinates of points to plot
_lowerCamelCase : list[float] = [] # y coordinates of points to plot
_lowerCamelCase : Tuple = 0.0
while t <= 1:
_lowerCamelCase : str = self.bezier_curve_function(__A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowerCamelCase : List[str] = [i[0] for i in self.list_of_points]
_lowerCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__A,__A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(__A,__A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 44 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowerCamelCase__ : List[str] = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 208 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Tuple = 'xlm-roberta'
def __init__( self:Dict , _a:List[Any]=3_05_22 , _a:Optional[Any]=7_68 , _a:Union[str, Any]=12 , _a:str=12 , _a:Union[str, Any]=30_72 , _a:str="gelu" , _a:List[Any]=0.1 , _a:List[str]=0.1 , _a:Dict=5_12 , _a:Optional[int]=2 , _a:Optional[Any]=0.02 , _a:List[str]=1e-12 , _a:Dict=1 , _a:Optional[Any]=0 , _a:str=2 , _a:Optional[int]="absolute" , _a:List[str]=True , _a:List[Any]=None , **_a:str , ):
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = hidden_act
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = position_embedding_type
snake_case__ = use_cache
snake_case__ = classifier_dropout
class __magic_name__ (snake_case_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
if self.task == "multiple-choice":
snake_case__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 208 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=2 , __lowerCAmelCase=9_9 , __lowerCAmelCase=0 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase="last" , __lowerCAmelCase=None , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Dict = parent
__magic_name__ :List[str] = batch_size
__magic_name__ :Optional[Any] = seq_length
__magic_name__ :Union[str, Any] = is_training
__magic_name__ :Dict = use_input_lengths
__magic_name__ :Tuple = use_token_type_ids
__magic_name__ :Dict = use_labels
__magic_name__ :List[str] = gelu_activation
__magic_name__ :Dict = sinusoidal_embeddings
__magic_name__ :List[Any] = causal
__magic_name__ :Dict = asm
__magic_name__ :Union[str, Any] = n_langs
__magic_name__ :List[Any] = vocab_size
__magic_name__ :int = n_special
__magic_name__ :Tuple = hidden_size
__magic_name__ :Optional[int] = num_hidden_layers
__magic_name__ :Optional[Any] = num_attention_heads
__magic_name__ :Any = hidden_dropout_prob
__magic_name__ :List[str] = attention_probs_dropout_prob
__magic_name__ :Any = max_position_embeddings
__magic_name__ :Tuple = type_vocab_size
__magic_name__ :Optional[int] = type_sequence_label_size
__magic_name__ :Tuple = initializer_range
__magic_name__ :Dict = num_labels
__magic_name__ :Any = num_choices
__magic_name__ :Optional[Any] = summary_type
__magic_name__ :Dict = use_proj
__magic_name__ :Tuple = scope
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ :Dict = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ :int = None
if self.use_input_lengths:
__magic_name__ :List[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__magic_name__ :Dict = None
if self.use_token_type_ids:
__magic_name__ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__magic_name__ :str = None
__magic_name__ :Union[str, Any] = None
__magic_name__ :int = None
if self.use_labels:
__magic_name__ :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ :Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
__magic_name__ :Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ :int = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A ( self ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = FlaubertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :str = model(__lowerCAmelCase , lengths=__lowerCAmelCase , langs=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase , langs=__lowerCAmelCase )
__magic_name__ :List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :str = FlaubertWithLMHeadModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :List[str] = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = FlaubertForQuestionAnsweringSimple(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :int = model(__lowerCAmelCase )
__magic_name__ :Optional[int] = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Tuple = FlaubertForQuestionAnswering(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )
__magic_name__ :int = model(
__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , p_mask=__lowerCAmelCase , )
__magic_name__ :Optional[Any] = model(
__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , )
((__magic_name__) , ) :Any = result_with_labels.to_tuple()
__magic_name__ :str = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
((__magic_name__) , ) :List[str] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = FlaubertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :int = model(__lowerCAmelCase )
__magic_name__ :List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :List[str] = self.num_labels
__magic_name__ :Any = FlaubertForTokenClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :List[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Dict = self.num_choices
__magic_name__ :Any = FlaubertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ :Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ :List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ :Any = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) :int = config_and_inputs
__magic_name__ :str = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
a__ = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
__magic_name__ :int = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__magic_name__ :Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
__magic_name__ :Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = FlaubertModelTester(self )
__magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , emb_dim=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ :List[str] = FlaubertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
@require_torch_gpu
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__magic_name__ :List[str] = True
__magic_name__ :Union[str, Any] = model_class(config=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Union[str, Any] = torch.jit.trace(
__lowerCAmelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) )
__magic_name__ :Any = torch.jit.load(os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) , map_location=__lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(__lowerCAmelCase ) , inputs_dict['''attention_mask'''].to(__lowerCAmelCase ) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :int = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
__magic_name__ :int = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
__magic_name__ :List[str] = model(__lowerCAmelCase )[0]
__magic_name__ :List[Any] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __lowerCAmelCase )
__magic_name__ :Optional[Any] = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = 'mctct'
def __init__( self , a__=8065 , a__=1536 , a__=36 , a__=6144 , a__=4 , a__=384 , a__=920 , a__=1e-5 , a__=0.3 , a__="relu" , a__=0.02 , a__=0.3 , a__=0.3 , a__=1 , a__=0 , a__=2 , a__=1 , a__=0.3 , a__=1 , a__=(7,) , a__=(3,) , a__=80 , a__=1 , a__=None , a__="sum" , a__=False , **a__ , ) -> Optional[Any]:
super().__init__(**a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = intermediate_size
A = num_attention_heads
A = attention_head_dim
A = max_position_embeddings
A = layer_norm_eps
A = layerdrop
A = hidden_act
A = initializer_range
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = pad_token_id
A = bos_token_id
A = eos_token_id
A = conv_glu_dim
A = conv_dropout
A = num_conv_layers
A = input_feat_per_channel
A = input_channels
A = conv_channels
A = ctc_loss_reduction
A = ctc_zero_infinity
# prevents config testing fail with exporting to json
A = list(a__ )
A = list(a__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
f'but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '
f'`config.num_conv_layers = {self.num_conv_layers}`.' )
| 641 | 0 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 1_6
lowercase_ = 3_2
def lowercase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : str = "bert-base-cased" ) -> Dict:
__a = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
__a = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase__ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowerCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__a = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
__a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ) -> Any:
# Initialize accelerator
__a = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a = config['''lr''']
__a = int(config['''num_epochs'''] )
__a = int(config['''seed'''] )
__a = int(config['''batch_size'''] )
__a = args.model_name_or_path
set_seed(lowerCAmelCase__ )
__a , __a = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
# Instantiate optimizer
__a = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
__a = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__a = 1
__a = (len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase__ , )
else:
__a = DummyScheduler(lowerCAmelCase__ , total_num_steps=lowerCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
__a = 0
# We also need to keep track of the stating epoch so files are named properly
__a = 0
# Now we train the model
__a = evaluate.load('''glue''' , '''mrpc''' )
__a = 0
__a = {}
for epoch in range(lowerCAmelCase__ , lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
__a = model(**lowerCAmelCase__ )
__a = outputs.loss
__a = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__a = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a = model(**lowerCAmelCase__ )
__a = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__a , __a = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase__ ) - 1:
__a = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__a = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
__a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase__ )
__a = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__a = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( ) -> List[Any]:
__a = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=lowerCAmelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase__ , )
parser.add_argument(
'''--output_dir''' , type=lowerCAmelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowerCAmelCase__ , default=3 , help='''Number of train epochs.''' , )
__a = parser.parse_args()
__a = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 713 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'lxmert'
__UpperCAmelCase : str = {}
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=9_500 , _a=1_600 , _a=400 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=9 , _a=5 , _a=5 , _a=2_048 , _a=4 , _a=6.67 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = hidden_size
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = num_qa_labels
__a = num_object_labels
__a = num_attr_labels
__a = l_layers
__a = x_layers
__a = r_layers
__a = visual_feat_dim
__a = visual_pos_dim
__a = visual_loss_normalizer
__a = task_matched
__a = task_mask_lm
__a = task_obj_predict
__a = task_qa
__a = visual_obj_loss
__a = visual_attr_loss
__a = visual_feat_loss
__a = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**_a )
| 65 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=6 , A=17 , A=23 , A=11 , A=True , ) -> Tuple:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = act_dim
_a = state_dim
_a = hidden_size
_a = max_length
_a = is_training
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
_a = random_attention_mask((self.batch_size, self.seq_length) )
_a = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def a__ (self ) -> str:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def a__ (self , A , A , A , A , A , A , A , ) -> List[Any]:
"""simple docstring"""
_a = DecisionTransformerModel(config=A )
model.to(A )
model.eval()
_a = model(A , A , A , A , A , A )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
__lowerCamelCase : List[str] = ()
__lowerCamelCase : Tuple = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCamelCase : str = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : str = False
__lowerCamelCase : Dict = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = DecisionTransformerModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DecisionTransformerModel.from_pretrained(A )
self.assertIsNotNone(A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(A )] , A )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = 2 # number of steps of autoregressive prediction we will perform
_a = 10 # defined by the RL environment, may be normalized
_a = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
_a = model.to(A )
_a = model.config
torch.manual_seed(0 )
_a = torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ) # env.reset()
_a = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=A )
_a = torch.tensor(A , device=A , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_a = state
_a = torch.zeros(1 , 0 , config.act_dim , device=A , dtype=torch.floataa )
_a = torch.zeros(1 , 0 , device=A , dtype=torch.floataa )
_a = torch.tensor(0 , device=A , dtype=torch.long ).reshape(1 , 1 )
for step in range(A ):
_a = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=A )] , dim=1 )
_a = torch.cat([rewards, torch.zeros(1 , 1 , device=A )] , dim=1 )
_a = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_a , _a , _a = model(
states=A , actions=A , rewards=A , returns_to_go=A , timesteps=A , attention_mask=A , return_dict=A , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_a , _a , _a , _a = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ),
1.0,
False,
{},
)
_a = action_pred[0, -1]
_a = torch.cat([states, state] , dim=1 )
_a = returns_to_go[0, -1] - reward
_a = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_a = torch.cat(
[timesteps, torch.ones((1, 1) , device=A , dtype=torch.long ) * (step + 1)] , dim=1 )
| 11 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = ConsistencyModelPipeline
UpperCamelCase : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase : Any = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
UpperCamelCase : Optional[int] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def __A ( self , A=False ) -> Tuple:
'''simple docstring'''
if class_cond:
lowerCamelCase = self.dummy_cond_unet
else:
lowerCamelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def __A ( self , A , A=0 ) -> List[str]:
'''simple docstring'''
if str(A ).startswith("""mps""" ):
lowerCamelCase = torch.manual_seed(A )
else:
lowerCamelCase = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase = {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = ConsistencyModelPipeline(**A )
lowerCamelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = pipe(**A ).images
assert image.shape == (1, 32, 32, 3)
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components(class_cond=A )
lowerCamelCase = ConsistencyModelPipeline(**A )
lowerCamelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = 0
lowerCamelCase = pipe(**A ).images
assert image.shape == (1, 32, 32, 3)
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = ConsistencyModelPipeline(**A )
lowerCamelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = 1
lowerCamelCase = None
lowerCamelCase = pipe(**A ).images
assert image.shape == (1, 32, 32, 3)
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components(class_cond=A )
lowerCamelCase = ConsistencyModelPipeline(**A )
lowerCamelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = 1
lowerCamelCase = None
lowerCamelCase = 0
lowerCamelCase = pipe(**A ).images
assert image.shape == (1, 32, 32, 3)
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , A=0 , A=False , A="cpu" , A=torch.floataa , A=(1, 3, 64, 64) ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = {
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
lowerCamelCase = self.get_fixed_latents(seed=A , device=A , dtype=A , shape=A )
lowerCamelCase = latents
return inputs
def __A ( self , A=0 , A="cpu" , A=torch.floataa , A=(1, 3, 64, 64) ) -> Union[str, Any]:
'''simple docstring'''
if type(A ) == str:
lowerCamelCase = torch.device(A )
lowerCamelCase = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase = randn_tensor(A , generator=A , device=A , dtype=A )
return latents
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCamelCase = ConsistencyModelPipeline(unet=A , scheduler=A )
pipe.to(torch_device=A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_inputs()
lowerCamelCase = pipe(**A ).images
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCamelCase = ConsistencyModelPipeline(unet=A , scheduler=A )
pipe.to(torch_device=A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_inputs()
lowerCamelCase = 1
lowerCamelCase = None
lowerCamelCase = pipe(**A ).images
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCamelCase = ConsistencyModelPipeline(unet=A , scheduler=A )
pipe.to(torch_device=A , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_inputs(get_fixed_latents=A , device=A )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A , enable_math=A , enable_mem_efficient=A ):
lowerCamelCase = pipe(**A ).images
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCamelCase = ConsistencyModelPipeline(unet=A , scheduler=A )
pipe.to(torch_device=A , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_inputs(get_fixed_latents=A , device=A )
lowerCamelCase = 1
lowerCamelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A , enable_math=A , enable_mem_efficient=A ):
lowerCamelCase = pipe(**A ).images
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 457 | 0 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """pytorch_model.bin"""
@dataclasses.dataclass
class lowerCamelCase_ :
a__ = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
a__ = dataclasses.field(
default=lowerCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class lowerCamelCase_ :
a__ = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
a__ = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
a__ = dataclasses.field(
default=lowerCamelCase , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
a__ = dataclasses.field(
default=lowerCamelCase , metadata={'''help''': '''The name of the task to train on.'''} , )
a__ = dataclasses.field(
default=lowerCamelCase , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class lowerCamelCase_ :
a__ = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
a__ = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
a__ = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
} , )
a__ = dataclasses.field(
default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
a__ = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
a__ = dataclasses.field(
default=lowerCamelCase , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
a__ = dataclasses.field(
default=lowerCamelCase , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
a__ = dataclasses.field(
default=lowerCamelCase , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
a__ = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
a__ = dataclasses.field(
default=1_00 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
a__ = dataclasses.field(
default=lowerCamelCase , metadata={'''help''': '''Random seed for initialization.'''} , )
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = datasets.concatenate_datasets([infer_input, infer_output], axis=1 )
if args.do_filter_by_confidence:
__magic_name__ :Tuple = dataset.filter(lambda snake_case : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__magic_name__ :int = int(eval_result * len(snake_case ) )
print(snake_case )
__magic_name__ :str = dataset.sort('''probability''', reverse=snake_case )
__magic_name__ :Any = dataset.select(range(snake_case ) )
__magic_name__ :Dict = dataset.remove_columns(['''label''', '''probability'''] )
__magic_name__ :List[Any] = dataset.rename_column('''prediction''', '''label''' )
__magic_name__ :List[str] = dataset.map(lambda snake_case : {"label": idalabel[example["label"]]} )
__magic_name__ :int = dataset.shuffle(seed=args.seed )
__magic_name__ :List[str] = os.path.join(snake_case, f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(snake_case, index=snake_case )
else:
dataset.to_json(snake_case )
def __lowercase ( snake_case, snake_case, snake_case, snake_case, **snake_case ):
"""simple docstring"""
__magic_name__ :Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__magic_name__ :str = STModelArguments(model_name_or_path=snake_case )
__magic_name__ :Union[str, Any] = STDataArguments(train_file=snake_case, infer_file=snake_case )
__magic_name__ :str = STTrainingArguments(output_dir=snake_case )
__magic_name__ :Union[str, Any] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(snake_case ).items():
setattr(snake_case, snake_case, snake_case )
for key, value in kwargs.items():
if hasattr(snake_case, snake_case ):
setattr(snake_case, snake_case, snake_case )
# Sanity checks
__magic_name__ :Optional[int] = {}
__magic_name__ :List[str] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__magic_name__ :Tuple = args.train_file
__magic_name__ :List[str] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__magic_name__ :Optional[Any] = args.eval_file
for key in data_files:
__magic_name__ :Dict = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
__magic_name__ :Optional[int] = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
__magic_name__ :List[str] = f'''{args.output_dir}/self-train_iter-{{}}'''.format
__magic_name__ :List[Any] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=snake_case )
os.makedirs(snake_case, exist_ok=snake_case )
accelerator.wait_for_everyone()
__magic_name__ :Optional[Any] = None
__magic_name__ :Any = None
__magic_name__ :Dict = 0
__magic_name__ :Tuple = False
# Show the progress bar
__magic_name__ :Dict = tqdm(range(args.max_selftrain_iterations ), disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0, int(args.max_selftrain_iterations ) ):
__magic_name__ :str = data_dir_format(snake_case )
assert os.path.exists(snake_case )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__magic_name__ :List[Any] = os.path.join(snake_case, '''stage-1''' )
__magic_name__ :Any = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(snake_case, snake_case ):
arguments_dict.update({key: value} )
__magic_name__ :Any = os.path.join(snake_case, '''best-checkpoint''', snake_case )
if os.path.exists(snake_case ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''', snake_case, snake_case, )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''', snake_case )
finetune(**snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(snake_case )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''', snake_case )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__magic_name__ :str = os.path.join(snake_case, '''best-checkpoint''' )
__magic_name__ :List[Any] = os.path.join(snake_case, '''stage-2''' )
# Update arguments_dict
__magic_name__ :List[Any] = model_path
__magic_name__ :Any = data_files['''train''']
__magic_name__ :Tuple = current_output_dir
__magic_name__ :List[Any] = os.path.join(snake_case, '''best-checkpoint''', snake_case )
if os.path.exists(snake_case ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''', snake_case, snake_case, )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''', snake_case )
finetune(**snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(snake_case )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''', snake_case )
__magic_name__ :List[Any] = iteration
__magic_name__ :Tuple = data_dir_format(iteration + 1 )
__magic_name__ :Optional[int] = AutoConfig.from_pretrained(os.path.join(snake_case, '''best-checkpoint''' ) )
__magic_name__ :Union[str, Any] = config.idalabel
__magic_name__ :int = os.path.join(snake_case, '''eval_results_best-checkpoint.json''' )
__magic_name__ :List[Any] = os.path.join(snake_case, '''test_results_best-checkpoint.json''' )
assert os.path.exists(snake_case )
with open(snake_case, '''r''' ) as f:
__magic_name__ :List[str] = float(json.load(snake_case )[args.eval_metric] )
__magic_name__ :Union[str, Any] = os.path.join(snake_case, '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(snake_case )
# Loading the dataset from local csv or json files.
__magic_name__ :List[Any] = load_dataset(args.data_file_extension, data_files={'''data''': data_files['''infer''']} )['''data''']
__magic_name__ :Optional[Any] = load_dataset('''csv''', data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(snake_case, exist_ok=snake_case )
shutil.copy(snake_case, os.path.join(snake_case, f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(snake_case ):
shutil.copy(snake_case, os.path.join(snake_case, f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(snake_case, snake_case, snake_case, snake_case, snake_case, snake_case )
accelerator.wait_for_everyone()
__magic_name__ :Tuple = os.path.join(snake_case, f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__magic_name__ :int = eval_result
if best_iteration is None:
__magic_name__ :List[str] = new_iteration
__magic_name__ :List[str] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__magic_name__ :Any = new_iteration
__magic_name__ :Dict = new_eval_result
__magic_name__ :str = 0
else:
if new_eval_result == best_eval_result:
__magic_name__ :Tuple = new_iteration
__magic_name__ :List[str] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__magic_name__ :Tuple = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''', snake_case )
logger.info('''Best evaluation result: %s = %f''', args.eval_metric, snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(snake_case, f'''eval_results_iter-{iteration}.json''' ), os.path.join(snake_case, '''eval_results_best-iteration.json''' ), )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''', args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''', args.eval_metric, snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(snake_case, f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ), os.path.join(snake_case, '''eval_results_best-iteration.json''' ), )
| 180 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = 1_0
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = [1, 2, 3, 4]
__magic_name__ :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
__magic_name__ :Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
__magic_name__ :str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
__magic_name__ , __magic_name__ :Optional[int] = process_story(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = ''''''
__magic_name__ , __magic_name__ :Optional[int] = process_story(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [] )
self.assertEqual(__lowerCAmelCase , [] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
__magic_name__ , __magic_name__ :str = process_story(__lowerCAmelCase )
__magic_name__ :Optional[int] = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Union[str, Any] = ['''It was the best of times.''']
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = torch.tensor([1, 2, 3, 4] )
__magic_name__ :List[str] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 0 ).numpy() , expected.numpy() )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
__magic_name__ :Union[str, Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 2_3 ).numpy() , expected.numpy() )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__magic_name__ :Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 1 ).numpy() , expected.numpy() )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = 1_0_1
__magic_name__ :List[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
__magic_name__ :str = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__magic_name__ :Tuple = compute_token_type_ids(__lowerCAmelCase , __lowerCAmelCase )
np.testing.assert_array_equal(__lowerCAmelCase , __lowerCAmelCase )
| 180 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : int , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = jnp.ones((batch_size, length)) / length
return scores
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Tuple = 20
SCREAMING_SNAKE_CASE_ : Dict = self._get_uniform_logits(batch_size=2 , length=lowercase_)
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_ : int = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
SCREAMING_SNAKE_CASE_ : Optional[int] = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_ : str = jax.nn.softmax(lowercase_ , axis=-1)
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_ : Tuple = FlaxTemperatureLogitsWarper(temperature=1.3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(lowercase_ , scores.copy() , cur_len=lowercase_) , axis=-1)
SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.nn.softmax(temp_dist_warper_smoother(lowercase_ , scores.copy() , cur_len=lowercase_) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : List[str] = 10
SCREAMING_SNAKE_CASE_ : List[Any] = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.broadcast_to(np.arange(lowercase_)[None, :] , (batch_size, vocab_size)).copy()
SCREAMING_SNAKE_CASE_ : Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_ : Dict = top_k_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
SCREAMING_SNAKE_CASE_ : Any = 5
SCREAMING_SNAKE_CASE_ : Dict = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
SCREAMING_SNAKE_CASE_ : Any = np.broadcast_to(np.arange(lowercase_)[None, :] , (batch_size, length)).copy()
SCREAMING_SNAKE_CASE_ : List[str] = top_k_warp_safety_check(lowercase_ , lowercase_ , cur_len=lowercase_)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : Any = 10
SCREAMING_SNAKE_CASE_ : List[str] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_ : Any = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
SCREAMING_SNAKE_CASE_ : Any = FlaxTopPLogitsWarper(0.8)
SCREAMING_SNAKE_CASE_ : Tuple = np.exp(top_p_warp(lowercase_ , lowercase_ , cur_len=lowercase_))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_ : Any = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3))
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.broadcast_to(np.arange(lowercase_)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_ : Optional[int] = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_p_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = 20
SCREAMING_SNAKE_CASE_ : Tuple = 4
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase_)
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor((batch_size, 20) , vocab_size=20)
SCREAMING_SNAKE_CASE_ : int = 5
SCREAMING_SNAKE_CASE_ : Dict = self._get_uniform_logits(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = min_dist_processor(lowercase_ , lowercase_ , cur_len=lowercase_)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''')])
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_ : str = self._get_uniform_logits(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = 15
SCREAMING_SNAKE_CASE_ : str = min_dist_processor(lowercase_ , lowercase_ , cur_len=lowercase_)
self.assertFalse(jnp.isinf(lowercase_).any())
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = 20
SCREAMING_SNAKE_CASE_ : List[str] = 4
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase_)
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor((batch_size, 1) , vocab_size=20)
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Tuple = self._get_uniform_logits(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = logits_processor(lowercase_ , lowercase_ , cur_len=lowercase_)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._get_uniform_logits(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = logits_processor(lowercase_ , lowercase_ , cur_len=lowercase_)
self.assertFalse(jnp.isinf(lowercase_).any())
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = 20
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 5
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase_ , eos_token_id=lowercase_)
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor((batch_size, 4) , vocab_size=20)
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Any = self._get_uniform_logits(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = logits_processor(lowercase_ , lowercase_ , cur_len=lowercase_)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._get_uniform_logits(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = logits_processor(lowercase_ , lowercase_ , cur_len=lowercase_)
self.assertFalse(jnp.isinf(lowercase_).any())
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : int = 10
SCREAMING_SNAKE_CASE_ : List[Any] = 15
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor((batch_size, sequence_length) , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = input_ids.copy()
SCREAMING_SNAKE_CASE_ : List[Any] = self._get_uniform_logits(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : str = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_ : List[str] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase_)
SCREAMING_SNAKE_CASE_ : str = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase_ , eos_token_id=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = 10
# no processor list
SCREAMING_SNAKE_CASE_ : Dict = temp_dist_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = top_k_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : str = top_p_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = min_dist_proc(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = bos_dist_proc(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_dist_proc(lowercase_ , lowercase_ , cur_len=lowercase_)
# with processor list
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_ : int = processor(lowercase_ , lowercase_ , cur_len=lowercase_)
# scores should be equal
self.assertTrue(jnp.allclose(lowercase_ , lowercase_ , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = 10
SCREAMING_SNAKE_CASE_ : Optional[Any] = 15
SCREAMING_SNAKE_CASE_ : Dict = 2
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor((batch_size, sequence_length) , lowercase_)
SCREAMING_SNAKE_CASE_ : str = input_ids.copy()
SCREAMING_SNAKE_CASE_ : str = self._get_uniform_logits(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : int = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_ : str = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase_ , eos_token_id=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = 10
# no processor list
def run_no_processor_list(lowercase_ : str , lowercase_ : List[str] , lowercase_ : int):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = temp_dist_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = top_k_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = top_p_warp(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min_dist_proc(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = bos_dist_proc(lowercase_ , lowercase_ , cur_len=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = eos_dist_proc(lowercase_ , lowercase_ , cur_len=lowercase_)
return scores
# with processor list
def run_processor_list(lowercase_ : int , lowercase_ : Any , lowercase_ : Dict):
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(lowercase_ , lowercase_ , cur_len=lowercase_)
return scores
SCREAMING_SNAKE_CASE_ : int = jax.jit(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = jax.jit(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = jitted_run_no_processor_list(lowercase_ , lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = jitted_run_processor_list(lowercase_ , lowercase_ , lowercase_)
# scores should be equal
self.assertTrue(jnp.allclose(lowercase_ , lowercase_ , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 512 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 0 |
import math
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float )->float:
if (
not isinstance(_SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float )->float:
if (
not isinstance(_SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 700 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : str )->list[int]:
_lowerCAmelCase = int(_SCREAMING_SNAKE_CASE )
# Initialize Result
_lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_SCREAMING_SNAKE_CASE ):
# Find denominations
while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ):
total_value -= int(_SCREAMING_SNAKE_CASE )
answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase_ = []
UpperCAmelCase_ = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
UpperCAmelCase_ = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase_ = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
UpperCAmelCase_ = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ") | 664 | 0 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__magic_name__ : int = re.compile(R"""\s+""")
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
return {"hash": hashlib.mda(re.sub(SCREAMING_SNAKE_CASE , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = [len(SCREAMING_SNAKE_CASE ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(SCREAMING_SNAKE_CASE ), "line_max": max(SCREAMING_SNAKE_CASE )}
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ):
UpperCamelCase : List[str] = ["""auto-generated""", """autogenerated""", """automatically generated"""]
UpperCamelCase : str = example["""content"""].splitlines()
for _, line in zip(range(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=0.05 ):
UpperCamelCase : List[str] = ["""unit tests""", """test file""", """configuration file"""]
UpperCamelCase : int = example["""content"""].splitlines()
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : int = 0
# first test
for _, line in zip(range(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
UpperCamelCase : Any = example["""content"""].count("""\n""" )
UpperCamelCase : List[str] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = ["""def """, """class """, """for """, """while """]
UpperCamelCase : Any = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=4 ):
UpperCamelCase : List[str] = example["""content"""].splitlines()
UpperCamelCase : Optional[Any] = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = tokenizer(example["""content"""] , truncation=SCREAMING_SNAKE_CASE )["""input_ids"""]
UpperCamelCase : Optional[Any] = len(example["""content"""] ) / len(SCREAMING_SNAKE_CASE )
return {"ratio": ratio}
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = {}
results.update(get_hash(SCREAMING_SNAKE_CASE ) )
results.update(line_stats(SCREAMING_SNAKE_CASE ) )
results.update(alpha_stats(SCREAMING_SNAKE_CASE ) )
results.update(char_token_ratio(SCREAMING_SNAKE_CASE ) )
results.update(is_autogenerated(SCREAMING_SNAKE_CASE ) )
results.update(is_config_or_test(SCREAMING_SNAKE_CASE ) )
results.update(has_no_keywords(SCREAMING_SNAKE_CASE ) )
results.update(has_few_assignments(SCREAMING_SNAKE_CASE ) )
return results
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if not check_uniques(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , """rb""" ) as f_in:
with gzip.open(str(SCREAMING_SNAKE_CASE ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
os.unlink(SCREAMING_SNAKE_CASE )
# Settings
__magic_name__ : List[Any] = HfArgumentParser(PreprocessingArguments)
__magic_name__ : Union[str, Any] = parser.parse_args()
if args.num_workers is None:
__magic_name__ : List[Any] = multiprocessing.cpu_count()
__magic_name__ : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__magic_name__ : Optional[int] = time.time()
__magic_name__ : Optional[Any] = load_dataset(args.dataset_name, split="""train""")
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__magic_name__ : List[str] = time.time()
__magic_name__ : Tuple = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__magic_name__ : List[str] = set(ds.unique("""hash"""))
__magic_name__ : List[Any] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__magic_name__ : int = time.time()
__magic_name__ : Union[str, Any] = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__magic_name__ : Optional[Any] = time.time()
__magic_name__ , __magic_name__ : str = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__magic_name__ : Dict = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
__magic_name__ : List[Any] = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
__magic_name__ : Optional[int] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__magic_name__ : Any = str(data_dir / f'''file-{file_number+1:012}.json''')
__magic_name__ : Dict = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 102 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ : Dict = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[Any] = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Union[str, Any] = ["""LayoutLMv3FeatureExtractor"""]
__magic_name__ : Any = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__magic_name__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 | 1 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def _UpperCAmelCase ( __A : str , __A : str ):
a_ : Dict = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
a_ : Tuple = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
a_ : Optional[Any] = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=__A , output_all_encodings=__A , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , __A ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
a_ : Optional[Any] = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
a_ : Dict = os.path.join(get_home_dir() , '''models''' )
a_ : List[Any] = _load_vocab(__A , __A , __A , cls=__A )
a_ : str = nlp.model.BERTModel(
__A , len(__A ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=__A , use_token_type_embed=__A , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=__A , use_decoder=__A , )
original_bort.load_parameters(__A , cast_dtype=__A , ignore_extra=__A )
a_ : Union[str, Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
a_ : List[Any] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(__A ),
}
a_ : List[str] = BertConfig.from_dict(__A )
a_ : Dict = BertForMaskedLM(__A )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__A : List[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__A : str , __A : Tuple ):
a_ : Tuple = hf_param.shape
a_ : int = to_torch(params[gluon_param] )
a_ : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
a_ : Any = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
a_ : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
a_ : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
a_ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
a_ : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
a_ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
a_ : BertSelfAttention = layer.attention.self
a_ : Union[str, Any] = check_and_map_params(
self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
a_ : Union[str, Any] = check_and_map_params(
self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
a_ : Optional[int] = check_and_map_params(
self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
a_ : Tuple = check_and_map_params(
self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
a_ : Any = check_and_map_params(
self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
a_ : Union[str, Any] = check_and_map_params(
self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
a_ : BertSelfOutput = layer.attention.output
a_ : List[Any] = check_and_map_params(
self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' )
a_ : List[Any] = check_and_map_params(
self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' )
a_ : Optional[int] = check_and_map_params(
self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' )
a_ : Dict = check_and_map_params(
self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
a_ : BertIntermediate = layer.intermediate
a_ : str = check_and_map_params(
intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
a_ : Optional[Any] = check_and_map_params(
intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
a_ : BertOutput = layer.output
a_ : Union[str, Any] = check_and_map_params(
bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
a_ : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
a_ : str = check_and_map_params(
bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
a_ : str = check_and_map_params(
bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
a_ : int = RobertaTokenizer.from_pretrained('''roberta-base''' )
a_ : Union[str, Any] = tokenizer.encode_plus(__A )['''input_ids''']
# Get gluon output
a_ : int = mx.nd.array([input_ids] )
a_ : List[str] = original_bort(inputs=__A , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__A )
a_ : Optional[Any] = BertModel.from_pretrained(__A )
hf_bort_model.eval()
a_ : int = tokenizer.encode_plus(__A , return_tensors='''pt''' )
a_ : Union[str, Any] = hf_bort_model(**__A )[0]
a_ : Tuple = output_gluon[0].asnumpy()
a_ : Optional[Any] = output_hf[0].detach().numpy()
a_ : Optional[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
a_ : Dict = np.allclose(__A , __A , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , __A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 666 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 666 | 1 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCamelCase : Optional[Any] = TypeVar("""KEY""")
__UpperCamelCase : Tuple = TypeVar("""VAL""")
@dataclass(frozen=a_ , slots=a_ )
class __SCREAMING_SNAKE_CASE( Generic[KEY, VAL] ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
class __SCREAMING_SNAKE_CASE( _Item ):
def __init__( self: List[Any] ) -> None:
super().__init__(UpperCamelCase , UpperCamelCase )
def __bool__( self: str ) -> bool:
return False
__UpperCamelCase : Any = _DeletedItem()
class __SCREAMING_SNAKE_CASE( MutableMapping[KEY, VAL] ):
def __init__( self: int , UpperCamelCase: int = 8 , UpperCamelCase: float = 0.75 ) -> None:
snake_case__ = initial_block_size
snake_case__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case__ = capacity_factor
snake_case__ = 0
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: KEY ) -> int:
return hash(UpperCamelCase ) % len(self._buckets )
def lowerCAmelCase_ ( self: str , UpperCamelCase: int ) -> int:
return (ind + 1) % len(self._buckets )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: int , UpperCamelCase: KEY , UpperCamelCase: VAL ) -> bool:
snake_case__ = self._buckets[ind]
if not stored:
snake_case__ = _Item(UpperCamelCase , UpperCamelCase )
self._len += 1
return True
elif stored.key == key:
snake_case__ = _Item(UpperCamelCase , UpperCamelCase )
return True
else:
return False
def lowerCAmelCase_ ( self: Tuple ) -> bool:
snake_case__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[int] ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: int ) -> None:
snake_case__ = self._buckets
snake_case__ = [None] * new_size
snake_case__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowerCAmelCase_ ( self: Tuple ) -> None:
self._resize(len(self._buckets ) * 2 )
def lowerCAmelCase_ ( self: List[str] ) -> None:
self._resize(len(self._buckets ) // 2 )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: KEY ) -> Iterator[int]:
snake_case__ = self._get_bucket_index(UpperCamelCase )
for _ in range(len(self._buckets ) ):
yield ind
snake_case__ = self._get_next_ind(UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: KEY , UpperCamelCase: VAL ) -> None:
for ind in self._iterate_buckets(UpperCamelCase ):
if self._try_set(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
break
def __setitem__( self: Optional[Any] , UpperCamelCase: KEY , UpperCamelCase: VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(UpperCamelCase , UpperCamelCase )
def __delitem__( self: Optional[Any] , UpperCamelCase: KEY ) -> None:
for ind in self._iterate_buckets(UpperCamelCase ):
snake_case__ = self._buckets[ind]
if item is None:
raise KeyError(UpperCamelCase )
if item is _deleted:
continue
if item.key == key:
snake_case__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self: Tuple , UpperCamelCase: KEY ) -> VAL:
for ind in self._iterate_buckets(UpperCamelCase ):
snake_case__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(UpperCamelCase )
def __len__( self: Dict ) -> int:
return self._len
def __iter__( self: List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self: Any ) -> str:
snake_case__ = ' ,'.join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 328 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : str = False
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: str ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self: Any ) -> Dict:
snake_case__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe.dual_guided(
prompt='first prompt' , image=UpperCamelCase , text_to_image_strength=0.75 , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase )
snake_case__ = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = generator.manual_seed(0 )
snake_case__ = pipe.dual_guided(
prompt='first prompt' , image=UpperCamelCase , text_to_image_strength=0.75 , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowerCAmelCase_ ( self: List[str] ) -> str:
snake_case__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = 'cyberpunk 2077'
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe.dual_guided(
prompt=UpperCamelCase , image=UpperCamelCase , text_to_image_strength=0.75 , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
snake_case__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
snake_case__ = 'A painting of a squirrel eating a burger '
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe.text_to_image(
prompt=UpperCamelCase , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
snake_case__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
snake_case__ = pipe.image_variation(UpperCamelCase , generator=UpperCamelCase , output_type='numpy' ).images
snake_case__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 328 | 1 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A_ ( __lowercase ):
'''simple docstring'''
def __init__( self , *_A , _A=None , _A=None , **_A) -> Dict:
"""simple docstring"""
super().__init__(*_A , **_A)
_UpperCAmelCase : int = eval_examples
_UpperCAmelCase : List[str] = post_process_function
def snake_case__ ( self , _A = None , _A=None , _A = None , _A = "eval" , **_A , ) -> Dict[str, float]:
"""simple docstring"""
_UpperCAmelCase : Dict = gen_kwargs.copy()
_UpperCAmelCase : Dict = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
_UpperCAmelCase : Tuple = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
_UpperCAmelCase : Dict = gen_kwargs
_UpperCAmelCase : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
_UpperCAmelCase : Any = self.get_eval_dataloader(_A)
_UpperCAmelCase : Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase : str = self.compute_metrics
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : List[Any] = time.time()
_UpperCAmelCase : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCAmelCase : str = eval_loop(
_A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , )
finally:
_UpperCAmelCase : Optional[Any] = compute_metrics
_UpperCAmelCase : str = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_UpperCAmelCase : int = self.post_process_function(_A , _A , _A)
_UpperCAmelCase : Optional[Any] = self.compute_metrics(_A)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'''{metric_key_prefix}_'''):
_UpperCAmelCase : Any = metrics.pop(_A)
metrics.update(output.metrics)
else:
_UpperCAmelCase : Optional[int] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_A)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
_UpperCAmelCase : Optional[int] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A)
return metrics
def snake_case__ ( self , _A , _A , _A=None , _A = "test" , **_A) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = gen_kwargs.copy()
_UpperCAmelCase : int = self.get_test_dataloader(_A)
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase : str = self.compute_metrics
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[str] = time.time()
_UpperCAmelCase : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCAmelCase : Optional[int] = eval_loop(
_A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , )
finally:
_UpperCAmelCase : str = compute_metrics
_UpperCAmelCase : Optional[Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
_UpperCAmelCase : List[Any] = self.post_process_function(_A , _A , _A , '''predict''')
_UpperCAmelCase : List[str] = self.compute_metrics(_A)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'''{metric_key_prefix}_'''):
_UpperCAmelCase : Optional[int] = metrics.pop(_A)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A)
| 186 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( __A : int , __A : Optional[Any] , __A : int ) -> int:
# Initialise PyTorch model
_UpperCAmelCase : Dict = RemBertConfig.from_json_file(__A )
print('''Building PyTorch model from configuration: {}'''.format(str(__A ) ) )
_UpperCAmelCase : int = RemBertModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__A , __A , __A )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(__A ) )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 186 | 1 |
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Tuple = []
_lowercase: Dict = set({"(", "[", "{"} )
_lowercase: List[Any] = set({")", "]", "}"} )
_lowercase: Union[str, Any] = {"{": "}", "[": "]", "(": ")"}
for i in range(len(__magic_name__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__magic_name__ ) == 0 or (len(__magic_name__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__magic_name__ ) == 0
def __lowerCAmelCase ( ):
_lowercase: str = input("Enter sequence of brackets: " )
if is_balanced(__magic_name__ ):
print(__magic_name__ , "is balanced" )
else:
print(__magic_name__ , "is not balanced" )
if __name__ == "__main__":
main()
| 226 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class A :
'''simple docstring'''
lowerCamelCase : Union[str, Any] = PegasusConfig
lowerCamelCase : Any = {}
lowerCamelCase : int = """gelu"""
def __init__( self : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int]=13 , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : Dict=True , _UpperCamelCase : Dict=False , _UpperCamelCase : Dict=99 , _UpperCamelCase : str=32 , _UpperCamelCase : Dict=5 , _UpperCamelCase : str=4 , _UpperCamelCase : Any=37 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : Any=20 , _UpperCamelCase : List[str]=2 , _UpperCamelCase : int=1 , _UpperCamelCase : List[str]=0 , ):
_lowercase: str = parent
_lowercase: int = batch_size
_lowercase: Optional[Any] = seq_length
_lowercase: Dict = is_training
_lowercase: List[str] = use_labels
_lowercase: Dict = vocab_size
_lowercase: Optional[Any] = hidden_size
_lowercase: Optional[int] = num_hidden_layers
_lowercase: int = num_attention_heads
_lowercase: Tuple = intermediate_size
_lowercase: Optional[int] = hidden_dropout_prob
_lowercase: Union[str, Any] = attention_probs_dropout_prob
_lowercase: Tuple = max_position_embeddings
_lowercase: Optional[Any] = eos_token_id
_lowercase: List[Any] = pad_token_id
_lowercase: int = bos_token_id
def UpperCAmelCase__ ( self : Tuple):
_lowercase: int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
_lowercase: str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
_lowercase: List[str] = np.concatenate([input_ids, eos_tensor] , axis=1)
_lowercase: Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowercase: Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowercase: Any = prepare_pegasus_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
return config, inputs_dict
def UpperCAmelCase__ ( self : int , _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : str):
_lowercase: List[Any] = 20
_lowercase: Optional[int] = model_class_name(_UpperCamelCase)
_lowercase: Union[str, Any] = model.encode(inputs_dict["input_ids"])
_lowercase , _lowercase: List[str] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_lowercase: Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase)
_lowercase: Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
_lowercase: Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase: int = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
_lowercase: List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
_lowercase: str = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCamelCase , )
_lowercase: Optional[Any] = model.decode(_UpperCamelCase , _UpperCamelCase)
_lowercase: List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}")
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : List[Any]):
_lowercase: Optional[Any] = 20
_lowercase: List[Any] = model_class_name(_UpperCamelCase)
_lowercase: str = model.encode(inputs_dict["input_ids"])
_lowercase , _lowercase: Tuple = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_lowercase: int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
_lowercase: Any = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase)
_lowercase: Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase: List[str] = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
_lowercase: int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
_lowercase: Dict = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
_lowercase: Tuple = model.decode(_UpperCamelCase , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase)
_lowercase: int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}")
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , ):
if attention_mask is None:
_lowercase: int = np.not_equal(__magic_name__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_lowercase: Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Dict = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowerCamelCase : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowerCamelCase : Tuple = True
lowerCamelCase : List[str] = False
lowerCamelCase : int = False
lowerCamelCase : Dict = False
def UpperCAmelCase__ ( self : List[Any]):
_lowercase: Any = FlaxPegasusModelTester(self)
_lowercase: Optional[int] = ConfigTester(self , config_class=_UpperCamelCase)
def UpperCAmelCase__ ( self : str):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Tuple):
_lowercase , _lowercase: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def UpperCAmelCase__ ( self : Tuple):
_lowercase , _lowercase: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase , _lowercase: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_lowercase: Union[str, Any] = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase)
_lowercase: Union[str, Any] = model_class(_UpperCamelCase)
@jax.jit
def encode_jitted(_UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=None , **_UpperCamelCase : int):
return model.encode(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase)
with self.subTest("JIT Enabled"):
_lowercase: Tuple = encode_jitted(**_UpperCamelCase).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
_lowercase: Tuple = encode_jitted(**_UpperCamelCase).to_tuple()
self.assertEqual(len(_UpperCamelCase) , len(_UpperCamelCase))
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase):
self.assertEqual(jitted_output.shape , output.shape)
def UpperCAmelCase__ ( self : Any):
_lowercase , _lowercase: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_lowercase: Union[str, Any] = model_class(_UpperCamelCase)
_lowercase: Any = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
_lowercase: int = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict):
return model.decode(
decoder_input_ids=_UpperCamelCase , decoder_attention_mask=_UpperCamelCase , encoder_outputs=_UpperCamelCase , )
with self.subTest("JIT Enabled"):
_lowercase: Dict = decode_jitted(**_UpperCamelCase).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
_lowercase: Union[str, Any] = decode_jitted(**_UpperCamelCase).to_tuple()
self.assertEqual(len(_UpperCamelCase) , len(_UpperCamelCase))
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def UpperCAmelCase__ ( self : str):
for model_class_name in self.all_model_classes:
_lowercase: Tuple = model_class_name.from_pretrained("google/pegasus-large" , from_pt=_UpperCamelCase)
_lowercase: Union[str, Any] = np.ones((1, 1))
_lowercase: List[str] = model(_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
@slow
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase: Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum")
_lowercase: int = PegasusTokenizer.from_pretrained("google/pegasus-xsum")
_lowercase: int = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
_lowercase: str = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
_lowercase: Any = tokenizer(_UpperCamelCase , return_tensors="np" , truncation=_UpperCamelCase , max_length=512 , padding=_UpperCamelCase)
_lowercase: Union[str, Any] = model.generate(**_UpperCamelCase , num_beams=2).sequences
_lowercase: Optional[int] = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase)
assert tgt_text == decoded
| 226 | 1 |
'''simple docstring'''
import os
def UpperCAmelCase__ ( ) -> Union[str, Any]:
with open(os.path.dirname(UpperCAmelCase__ ) + """/grid.txt""" ) as f:
A_ = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCAmelCase__ ) for x in f.readline().split()] )
A_ = 0
# right
for i in range(20 ):
for j in range(17 ):
A_ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
A_ = temp
# down
for i in range(17 ):
for j in range(20 ):
A_ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
A_ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
A_ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
A_ = temp
# diagonal 2
for i in range(17 ):
for j in range(3, 20 ):
A_ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
A_ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 667 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 667 | 1 |
from typing import Any
class lowercase :
def __init__( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : int = data
lowerCAmelCase__ : str = None
class lowercase :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = None
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.head
while temp is not None:
print(temp.data , end=''' ''' )
lowerCAmelCase__ : int = temp.next
print()
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : int = Node(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : List[Any] = self.head
lowerCAmelCase__ : Optional[int] = new_node
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
lowerCAmelCase__ : Tuple = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCAmelCase__ : List[str] = node_a.next
lowerCAmelCase__ : List[str] = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCAmelCase__ : int = node_a.next
if node_a is None or node_a is None:
return
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = node_a.data, node_a.data
if __name__ == "__main__":
A__ : str = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 233 |
import logging
from transformers.configuration_utils import PretrainedConfig
A__ : Tuple = logging.getLogger(__name__)
class lowercase ( __UpperCamelCase ):
__a = """masked_bert"""
def __init__( self , SCREAMING_SNAKE_CASE__=30522 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="topK" , SCREAMING_SNAKE_CASE__="constant" , SCREAMING_SNAKE_CASE__=0.0 , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : Tuple = attention_probs_dropout_prob
lowerCAmelCase__ : int = max_position_embeddings
lowerCAmelCase__ : Optional[int] = type_vocab_size
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : List[Any] = layer_norm_eps
lowerCAmelCase__ : List[str] = pruning_method
lowerCAmelCase__ : List[Any] = mask_init
lowerCAmelCase__ : Dict = mask_scale
| 233 | 1 |
from math import isclose, sqrt
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = point_y / 4 / point_x
UpperCamelCase__ : List[Any] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
UpperCamelCase__ : Union[str, Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
UpperCamelCase__ : List[str] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
UpperCamelCase__ : Union[str, Any] = outgoing_gradient**2 + 4
UpperCamelCase__ : str = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
UpperCamelCase__ : Dict = (point_y - outgoing_gradient * point_x) ** 2 - 100
UpperCamelCase__ : Optional[int] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
UpperCamelCase__ : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
UpperCamelCase__ : Optional[int] = x_minus if isclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else x_plus
UpperCamelCase__ : Optional[int] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _a ( SCREAMING_SNAKE_CASE : float = 1.4 , SCREAMING_SNAKE_CASE : float = -9.6 ):
"""simple docstring"""
UpperCamelCase__ : int = 0
UpperCamelCase__ : float = first_x_coord
UpperCamelCase__ : float = first_y_coord
UpperCamelCase__ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = next_point(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"{solution() = }")
| 106 |
__UpperCamelCase : List[Any] = 256
# Modulus to hash a string
__UpperCamelCase : Union[str, Any] = 100_0003
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE )
if p_len > t_len:
return False
UpperCamelCase__ : Any = 0
UpperCamelCase__ : str = 0
UpperCamelCase__ : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Any = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
UpperCamelCase__ : List[str] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
UpperCamelCase__ : Dict = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
UpperCamelCase__ : Optional[int] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Tuple = '''abc1abc12'''
UpperCamelCase__ : Dict = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
UpperCamelCase__ : List[str] = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 2)
UpperCamelCase__ : Optional[int] = '''ABABX'''
UpperCamelCase__ : int = '''ABABZABABYABABX'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 3)
UpperCamelCase__ : int = '''AAAB'''
UpperCamelCase__ : str = '''ABAAAAAB'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 4)
UpperCamelCase__ : Union[str, Any] = '''abcdabcy'''
UpperCamelCase__ : List[str] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 5)
UpperCamelCase__ : Tuple = '''Lü'''
UpperCamelCase__ : Any = '''Lüsai'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = '''Lue'''
assert not rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 106 | 1 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = inspect.getfile(accelerate.test_utils )
A_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
A_ = ["""accelerate""", """launch"""]
A_ = Path.home() / """.cache/huggingface/accelerate"""
A_ = """default_config.yaml"""
A_ = config_folder / config_file
A_ = config_folder / """_default_config.yaml"""
A_ = Path("""tests/test_configs""" )
@classmethod
def _UpperCAmelCase ( cls ) -> Dict:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCAmelCase ( cls ) -> int:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> List[Any]:
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=_UpperCAmelCase ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_UpperCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> List[Any]:
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = """test-tpu"""
A_ = """us-central1-a"""
A_ = """ls"""
A_ = ["""accelerate""", """tpu-config"""]
A_ = """cd /usr/share"""
A_ = """tests/test_samples/test_command_file.sh"""
A_ = """Running gcloud compute tpus tpu-vm ssh"""
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_UpperCAmelCase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
| 23 |
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 610 | 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
A__: str = logging.getLogger()
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
_a : Optional[int] =argparse.ArgumentParser()
parser.add_argument("""-f""" )
_a : Union[str, Any] =parser.parse_args()
return args.f
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ) -> Any:
_a : Optional[int] ={}
_a : str =os.path.join(lowercase__ ,"""all_results.json""" )
if os.path.exists(lowercase__ ):
with open(lowercase__ ,"""r""" ) as f:
_a : Any =json.load(lowercase__ )
else:
raise ValueError(F"can't find {path}" )
return results
def SCREAMING_SNAKE_CASE_ ( ) -> int:
_a : List[Any] =torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
A__: Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A__ ( __snake_case ):
@classmethod
def __UpperCAmelCase ( cls :List[str] ) -> Optional[int]:
'''simple docstring'''
_a : str =tempfile.mkdtemp()
_a : List[Any] =os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
_a : int =["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def __UpperCAmelCase ( cls :List[str] ) -> List[Any]:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __UpperCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
_a : Any =self.get_auto_remove_tmp_dir()
_a : str =f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
_a : Any =get_results(__UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __UpperCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
_a : Union[str, Any] =self.get_auto_remove_tmp_dir()
_a : Optional[int] =f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_a : Tuple =get_results(__UpperCamelCase )
self.assertLess(result["""perplexity"""] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __UpperCAmelCase ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
_a : Tuple =self.get_auto_remove_tmp_dir()
_a : Optional[Any] =f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_a : int =get_results(__UpperCamelCase )
self.assertLess(result["""perplexity"""] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __UpperCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
_a : Any =7 if get_gpu_count() > 1 else 2
_a : List[str] =self.get_auto_remove_tmp_dir()
_a : str =f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_a : List[str] =get_results(__UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =self.get_auto_remove_tmp_dir()
_a : List[Any] =f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_a : Union[str, Any] =get_results(__UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 2_8 )
self.assertGreaterEqual(result["""eval_exact"""] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __UpperCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
_a : Union[str, Any] =self.get_auto_remove_tmp_dir()
_a : Tuple =f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_a : Tuple =get_results(__UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
_a : int =self.get_auto_remove_tmp_dir()
_a : Dict =f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_a : int =get_results(__UpperCamelCase )
self.assertGreaterEqual(result["""eval_rouge1"""] , 1_0 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __UpperCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : Optional[int] =self.get_auto_remove_tmp_dir()
_a : Dict =f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_a : List[Any] =get_results(__UpperCamelCase )
self.assertGreaterEqual(result["""eval_bleu"""] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , """translation_no_trainer""" ) ) )
@slow
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
_a : Optional[int] =logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCamelCase )
_a : str =self.get_auto_remove_tmp_dir()
_a : Optional[Any] =f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
_a : List[str] =get_results(__UpperCamelCase )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.10 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __UpperCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[int] =self.get_auto_remove_tmp_dir()
_a : str =f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
_a : Union[str, Any] =get_results(__UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , """image_classification_no_trainer""" ) ) )
| 702 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
A__: List[str] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
A__: Tuple = TaTokenizerFast
A__: List[str] = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: int = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: int = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Any = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
A__: Any = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 506 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCAmelCase_ : Union[str, Any] = logging.getLogger(__name__)
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=3_0522, type=int)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
logger.info(F"Loading data from {args.data_file}")
with open(args.data_file, 'rb') as fp:
UpperCAmelCase_ : List[Any] = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
UpperCAmelCase_ : Union[str, Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
UpperCAmelCase_ : List[str] = [0] * args.vocab_size
for k, v in counter.items():
UpperCAmelCase_ : Optional[Any] = v
logger.info(F"Dump to {args.token_counts_dump}")
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 533 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 533 | 1 |
from __future__ import annotations
__lowerCamelCase = []
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
"""simple docstring"""
for i in range(len(UpperCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCAmelCase , -1 , -1 ) , range(UpperCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCAmelCase , -1 , -1 ) , range(UpperCAmelCase , len(UpperCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> bool:
"""simple docstring"""
if row >= len(UpperCAmelCase ):
solution.append(UpperCAmelCase )
printboard(UpperCAmelCase )
print()
return True
for i in range(len(UpperCAmelCase ) ):
if is_safe(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_a : str = 1
solve(UpperCAmelCase , row + 1 )
_a : str = 0
return False
def UpperCamelCase__ ( UpperCAmelCase ) -> None:
"""simple docstring"""
for i in range(len(UpperCAmelCase ) ):
for j in range(len(UpperCAmelCase ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
__lowerCamelCase = 8
__lowerCamelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution)) | 714 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 307 | 0 |
__a = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__a = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__a = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
assert len(str(_UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
lowercase : Tuple = year // 100
lowercase : int = (5 * (century % 4) + 2) % 7
lowercase : int = year % 100
lowercase : List[str] = centurian % 12
lowercase : Dict = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowercase : Union[str, Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowercase : Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=14 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=0.02 , ):
lowercase : Any = parent
lowercase : Any = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : Dict = is_training
lowercase : List[Any] = use_input_mask
lowercase : Optional[int] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : Any = vocab_size
lowercase : List[Any] = hidden_size
lowercase : str = rotary_dim
lowercase : Tuple = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : Optional[int] = intermediate_size
lowercase : Optional[int] = hidden_act
lowercase : List[str] = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : Union[str, Any] = max_position_embeddings
lowercase : List[Any] = initializer_range
lowercase : str = None
lowercase : Dict = vocab_size - 1
lowercase : List[Any] = vocab_size - 1
lowercase : Optional[Any] = vocab_size - 1
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : List[Any] = None
if self.use_input_mask:
lowercase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
lowercase : str = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : int = config_and_inputs
lowercase : List[str] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = 20
lowercase : Optional[int] = model_class_name(SCREAMING_SNAKE_CASE__ )
lowercase : int = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowercase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase : str = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Dict = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase : int = model(
input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Tuple = model(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = 20
lowercase : List[Any] = model_class_name(SCREAMING_SNAKE_CASE__ )
lowercase : Any = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowercase : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE__ )
lowercase : int = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase : List[Any] = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Optional[int] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase : str = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : Optional[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A : Union[str, Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
lowercase : List[Any] = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowercase , lowercase , lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowercase , lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@tooslow
def __lowerCamelCase ( self ):
lowercase : str = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
lowercase : int = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
lowercase : Tuple = False
lowercase : List[Any] = model.config.eos_token_id
lowercase : Optional[int] = jax.jit(model.generate )
lowercase : Optional[Any] = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
lowercase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase : List[str] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Union[str, Any] = pt_inputs['''input_ids'''].shape
lowercase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase : int = 0
lowercase : List[str] = 1
lowercase : int = 0
lowercase : Optional[Any] = 1
lowercase : Dict = pt_model_class(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : int = model_class(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa )
lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = fx_state
with torch.no_grad():
lowercase : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
lowercase : Union[str, Any] = fx_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , from_pt=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = fx_model_loaded(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase : Any = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = pt_model_class(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : Tuple = model_class(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa )
lowercase : List[Any] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , fx_model.params )
lowercase , lowercase : Optional[int] = pt_inputs['''input_ids'''].shape
lowercase : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = 0
lowercase : List[Any] = 1
lowercase : str = 0
lowercase : Tuple = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowercase : List[Any] = pt_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
lowercase : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : int = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , from_flax=SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
lowercase : Dict = pt_model_loaded(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowercase : Dict = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 319 | 1 |
from __future__ import annotations
import numpy as np
def a__ ( _UpperCamelCase : list[float] ):
return np.maximum(0 ,_A )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 709 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class __lowerCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained(__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = generator.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = '''cyberpunk 2077'''
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = '''A painting of a squirrel eating a burger '''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.text_to_image(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = pipe.image_variation(__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 622 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCamelCase_ :
def __init__( self : List[str] , _A : Union[str, Any] , _A : Tuple=13 , _A : Optional[int]=7 , _A : Optional[Any]=True , _A : Tuple=True , _A : Union[str, Any]=True , _A : Any=99 , _A : Optional[int]=32 , _A : Optional[Any]=5 , _A : Optional[Any]=4 , _A : Tuple=37 , _A : Tuple="gelu" , _A : int=0.1 , _A : Dict=0.1 , _A : Any=512 , _A : Union[str, Any]=16 , _A : Optional[int]=2 , _A : List[str]=0.0_2 , _A : Optional[Any]=3 , _A : Dict=4 , _A : int=None , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : Optional[int] = seq_length
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Dict = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : int = scope
UpperCAmelCase__ : List[Any] = self.vocab_size - 1
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
UpperCAmelCase__ : str = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase_ ( self : Tuple , _A : Tuple , _A : Optional[int] , _A : int , _A : Tuple , *_A : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = OpenAIGPTModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[int] = model(_A , token_type_ids=_A , head_mask=_A )
UpperCAmelCase__ : Union[str, Any] = model(_A , token_type_ids=_A )
UpperCAmelCase__ : Optional[int] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , _A : str , _A : Dict , _A : List[str] , _A : int , *_A : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = OpenAIGPTLMHeadModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[str] = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : Dict , _A : Dict , _A : List[Any] , *_A : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : str , _A : int , _A : Tuple , _A : Optional[int] , _A : Tuple , *_A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : List[str] = OpenAIGPTForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : str = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = config_and_inputs
UpperCAmelCase__ : Union[str, Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase__ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase_ ( self : str , _A : Tuple , _A : Union[str, Any] , _A : Tuple , _A : Tuple , _A : Dict ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowercase_ ( self : Union[str, Any] , _A : List[Any] , _A : int , _A : Union[str, Any]=False ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_A , )
UpperCAmelCase__ : List[str] = inputs_dict['''labels''']
UpperCAmelCase__ : Optional[Any] = inputs_dict['''labels''']
UpperCAmelCase__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_A , )
UpperCAmelCase__ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = OpenAIGPTModelTester(self )
UpperCAmelCase__ : int = ConfigTester(self , config_class=_A , n_embd=37 )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_A )
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(_A )
UpperCAmelCase__ : List[str] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=_A ) # the president is
UpperCAmelCase__ : str = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase__ : Tuple = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 75 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a__ ( ) -> List[str]:
UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase__ : List[Any] = g.get_repo('''huggingface/transformers''' )
UpperCAmelCase__ : List[str] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 75 | 1 |
def A ( a_ ) -> list:
__UpperCamelCase : List[str] =False
while is_sorted is False: # Until all the indices are traversed keep looping
__UpperCamelCase : Union[str, Any] =True
for i in range(0 ,len(a_ ) - 1 ,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__UpperCamelCase : Any =input_list[i + 1], input_list[i]
# swapping if elements not in order
__UpperCamelCase : Dict =False
for i in range(1 ,len(a_ ) - 1 ,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__UpperCamelCase : int =input_list[i + 1], input_list[i]
# swapping if elements not in order
__UpperCamelCase : Dict =False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
A_ :Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
A_ :Dict = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 718 |
A_ :str = '''Tobias Carryer'''
from time import time
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=int(time() ) ): # noqa: B008
"""simple docstring"""
__UpperCamelCase : Optional[int] =multiplier
__UpperCamelCase : str =increment
__UpperCamelCase : Union[str, Any] =modulo
__UpperCamelCase : str =seed
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =(self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
A_ :Optional[int] = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31)
while True:
print(lcg.next_number())
| 154 | 0 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=13_37 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = split_dict._to_yaml_list()
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
lowercase__ = SplitDict._from_yaml_list(SCREAMING_SNAKE_CASE )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowercase__ = None
# the split name of split_dict takes over the name of the split info object
lowercase__ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=SCREAMING_SNAKE_CASE ), SplitInfo(dataset_name='''my_dataset''' )] )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 43 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
snake_case__ : int = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = CLIPConfig
_a = ["CLIPEncoderLayer"]
def __init__( self : Tuple , __a : CLIPConfig ) ->Union[str, Any]:
super().__init__(__a )
lowerCamelCase_ : List[Any] = CLIPVisionModelWithProjection(config.vision_config )
lowerCamelCase_ : int = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCamelCase_ : int = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def _lowerCAmelCase ( self : Dict , __a : List[Any] , __a : Tuple , __a : int=0.5 , __a : Optional[Any]=0.5 ) ->Union[str, Any]:
lowerCamelCase_ : Dict = self.vision_model(__a )[0]
lowerCamelCase_ : str = self.p_head(__a )
lowerCamelCase_ : Union[str, Any] = nsfw_detected.flatten()
lowerCamelCase_ : Tuple = nsfw_detected > p_threshold
lowerCamelCase_ : Dict = nsfw_detected.tolist()
if any(__a ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(__a ):
if nsfw_detected_:
lowerCamelCase_ : int = np.zeros(images[idx].shape )
lowerCamelCase_ : Union[str, Any] = self.w_head(__a )
lowerCamelCase_ : List[str] = watermark_detected.flatten()
lowerCamelCase_ : Dict = watermark_detected > w_threshold
lowerCamelCase_ : Optional[int] = watermark_detected.tolist()
if any(__a ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(__a ):
if watermark_detected_:
lowerCamelCase_ : Tuple = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 278 | 0 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__( __a, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def lowercase_ ( self ) -> int:
super().setUp()
lowerCAmelCase_ : Any = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
lowerCAmelCase_ : str = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase_ : Tuple = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
lowerCAmelCase_ : Optional[int] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
lowerCAmelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(snake_case__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(snake_case__ ) )
def lowercase_ ( self , **__lowercase ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase_ ( self , __lowercase ) -> Union[str, Any]:
lowerCAmelCase_ : int = '''adapt act apte'''
lowerCAmelCase_ : int = '''adapt act apte'''
return input_text, output_text
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ : Any = '''adapt act apte'''
lowerCAmelCase_ : List[str] = ['''adapt''', '''act''', '''ap@@''', '''te''']
lowerCAmelCase_ : str = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase_ : Union[str, Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowerCAmelCase_ : Optional[Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Union[str, Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_3_8_4]
lowerCAmelCase_ : List[str] = '''I am a small frog.'''
lowerCAmelCase_ : Optional[Any] = tok([src_text] , padding=snake_case__ , truncation=snake_case__ )['''input_ids''']
lowerCAmelCase_ : int = tok.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
lowerCAmelCase_ : Optional[Any] = '''I am a small frog .'''
lowerCAmelCase_ : Optional[Any] = '''.'''
lowerCAmelCase_ : Optional[int] = tok(snake_case__ )['''input_ids''']
lowerCAmelCase_ : Any = tok(snake_case__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0] | 706 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] ={
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese"""
def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : str = intermediate_multiple_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Dict = rotary_pct
lowerCAmelCase_ : Union[str, Any] = rotary_emb_base
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : Dict = hidden_dropout | 619 | 0 |
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCamelCase = 3_00 # TEMPERATURE (unit = K)
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> float:
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=768 ):
super().__init__(UpperCAmelCase__ )
A__ = proj_size
A__ = CLIPVisionModel(UpperCAmelCase__ )
A__ = PaintByExampleMapper(UpperCAmelCase__ )
A__ = nn.LayerNorm(config.hidden_size )
A__ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
A__ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=False ):
A__ = self.model(pixel_values=UpperCAmelCase__ )
A__ = clip_output.pooler_output
A__ = self.mapper(latent_states[:, None] )
A__ = self.final_layer_norm(UpperCAmelCase__ )
A__ = self.proj_out(UpperCAmelCase__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase__ ):
super().__init__()
A__ = (config.num_hidden_layers + 1) // 5
A__ = config.hidden_size
A__ = 1
A__ = nn.ModuleList(
[
BasicTransformerBlock(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , activation_fn="gelu" , attention_bias=UpperCAmelCase__ )
for _ in range(UpperCAmelCase__ )
] )
def __A ( self , UpperCAmelCase__ ):
for block in self.blocks:
A__ = block(UpperCAmelCase__ )
return hidden_states
| 491 | 0 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowercase) , """Tatoeba directory does not exist.""")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__UpperCamelCase )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
self.resolver.convert_models(["heb-eng"] )
@slow
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__UpperCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 716 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict=3 , __UpperCamelCase : List[Any]=32 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Union[str, Any]=10 , __UpperCamelCase : Any=[10, 20, 30, 40] , __UpperCamelCase : Optional[int]=[1, 1, 2, 1] , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : int=True , __UpperCamelCase : Any="relu" , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Any=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embeddings_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = len(__UpperCamelCase )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Optional[int] ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : str ):
_UpperCAmelCase = TFResNetModel(config=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : int ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFResNetForImageClassification(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE : int = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = TFResNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : int ):
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def UpperCAmelCase__ ( self : Tuple ):
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def UpperCAmelCase__ ( self : Any ):
def check_hidden_states_output(__UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCAmelCase = layer_type
_UpperCAmelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def UpperCAmelCase__ ( self : Tuple ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFResNetModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( ) -> List[str]:
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : str ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="tf" )
# forward pass
_UpperCAmelCase = model(**__UpperCamelCase )
# verify the logits
_UpperCAmelCase = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
_UpperCAmelCase = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCamelCase , atol=1e-4 ) )
| 129 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , ):
UpperCAmelCase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18}
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Dict = image_size
UpperCAmelCase__ : List[Any] = min_resolution
UpperCAmelCase__ : str = max_resolution
UpperCAmelCase__ : Union[str, Any] = do_resize
UpperCAmelCase__ : Tuple = size
UpperCAmelCase__ : int = do_normalize
def __UpperCAmelCase ( self ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = ImageGPTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = ImageGPTImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """clusters""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : Optional[int] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ : Union[str, Any] = os.path.join(_lowerCAmelCase , """image_processor.json""" )
image_processor_first.to_json_file(_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_json_file(_lowerCAmelCase ).to_dict()
UpperCAmelCase__ : Dict = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = self.image_processing_class.from_pretrained(_lowerCAmelCase ).to_dict()
UpperCAmelCase__ : Tuple = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowerCAmelCase )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __UpperCAmelCase ( self ):
pass
def _lowerCamelCase ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Any = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
UpperCAmelCase__ : Dict = Image.open(dataset[4]["""file"""] )
UpperCAmelCase__ : Optional[Any] = Image.open(dataset[5]["""file"""] )
UpperCAmelCase__ : List[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
UpperCAmelCase__ : int = prepare_images()
# test non-batched
UpperCAmelCase__ : List[str] = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
UpperCAmelCase__ : List[Any] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _lowerCAmelCase )
# test batched
UpperCAmelCase__ : List[str] = image_processing(_lowerCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
UpperCAmelCase__ : Any = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _lowerCAmelCase )
| 79 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = MobileBertTokenizer
__lowerCamelCase = MobileBertTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = filter_non_english
__lowerCamelCase = 'google/mobilebert-uncased'
def __UpperCAmelCase ( self ):
super().setUp()
UpperCAmelCase__ : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase__ : List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Tuple = """UNwant\u00E9d,running"""
UpperCAmelCase__ : Union[str, Any] = """unwanted, running"""
return input_text, output_text
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = self.tokenizer_class(self.vocab_file )
UpperCAmelCase__ : Tuple = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_lowerCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def __UpperCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : Dict = self.get_rust_tokenizer()
UpperCAmelCase__ : List[str] = """UNwant\u00E9d,running"""
UpperCAmelCase__ : Optional[int] = tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase__ : Any = tokenizer.encode(_lowerCAmelCase )
UpperCAmelCase__ : str = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# With lower casing
UpperCAmelCase__ : Tuple = self.get_tokenizer(do_lower_case=_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.get_rust_tokenizer(do_lower_case=_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = """UNwant\u00E9d,running"""
UpperCAmelCase__ : int = tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase__ : Any = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase__ : List[str] = tokenizer.encode(_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
UpperCAmelCase__ : List[str] = {}
for i, token in enumerate(_lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = i
UpperCAmelCase__ : str = WordpieceTokenizer(vocab=_lowerCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
UpperCAmelCase__ : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
UpperCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __UpperCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
UpperCAmelCase__ : Optional[Any] = tokenizer_r.encode_plus(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , )
UpperCAmelCase__ : Any = tokenizer_r.do_lower_case if hasattr(_lowerCAmelCase , """do_lower_case""" ) else False
UpperCAmelCase__ : Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = ["""的""", """人""", """有"""]
UpperCAmelCase__ : Tuple = """""".join(_lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer_p.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer_r.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : Any = tokenizer_r.convert_ids_to_tokens(_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer_p.convert_ids_to_tokens(_lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer_p.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer_r.convert_ids_to_tokens(_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(_lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase__ : List[str] = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_lowerCAmelCase )
]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
| 79 | 1 |
import numpy as np
def __UpperCAmelCase ( __A ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __UpperCAmelCase ( __A ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A = 16
A = 32
def __UpperCAmelCase ( __A , __A = 1_6 ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(__A ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__A , max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ = datasets.map(
__A , batched=__A , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ = 1_6
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ = 8
else:
UpperCAmelCase__ = None
return tokenizer.pad(
__A , padding="longest" , max_length=__A , pad_to_multiple_of=__A , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=__A , collate_fn=__A , batch_size=__A )
UpperCAmelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=__A , collate_fn=__A , batch_size=__A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __A , __A ) -> Dict:
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , __A ) == "1":
UpperCAmelCase__ = 2
# New Code #
UpperCAmelCase__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
UpperCAmelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__A )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ = config["lr"]
UpperCAmelCase__ = int(config["num_epochs"] )
UpperCAmelCase__ = int(config["seed"] )
UpperCAmelCase__ = int(config["batch_size"] )
UpperCAmelCase__ = evaluate.load("glue" , "mrpc" )
set_seed(__A )
UpperCAmelCase__ , UpperCAmelCase__ = get_dataloaders(__A , __A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ = AdamW(params=model.parameters() , lr=__A )
# Instantiate scheduler
UpperCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=__A , num_warmup_steps=1_0_0 , num_training_steps=(len(__A ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = accelerator.prepare(
__A , __A , __A , __A , __A )
# Now we train the model
for epoch in range(__A ):
model.train()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__A ):
UpperCAmelCase__ = model(**__A )
UpperCAmelCase__ = output.loss
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ = model(**__A )
UpperCAmelCase__ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__A , references=__A , )
UpperCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __A )
def __UpperCAmelCase ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=__A , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 277 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : Optional[Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 584 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowercase__ =subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
lowercase__ =(
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('utf-8').split()
)
lowercase__ ='|'.join(sys.argv[1:])
lowercase__ =re.compile(rF"""^({joined_dirs}).*?\.py$""")
lowercase__ =[x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 263 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a (UpperCamelCase__ , unittest.TestCase ):
__a : str = MgpstrTokenizer
__a : List[str] = False
__a : int = {}
__a : Any = False
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase_ : Optional[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCAmelCase_ : List[str] = dict(zip(_a , range(len(_a ) ) ) )
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def UpperCAmelCase__ ( self : List[str] , **__magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def UpperCAmelCase__ ( self : str , __magic_name__ : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = """tester"""
UpperCAmelCase_ : Union[str, Any] = """tester"""
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase_ : int = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({'''cls_token''': special_token} )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
UpperCAmelCase_ : Optional[int] = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase_ : Any = self.get_input_output_texts(_a )
UpperCAmelCase_ : str = tokenizer.tokenize(_a )
UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(_a )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
UpperCAmelCase_ : List[Any] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
UpperCAmelCase_ : Optional[Any] = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def UpperCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
| 710 |
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int:
UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 644 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['LayoutLMv2FeatureExtractor']
SCREAMING_SNAKE_CASE__ = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 631 | """simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 50 ) -> int:
_lowerCAmelCase : str = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 213 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[Any] = KandinskyVaaControlnetImgaImgPipeline
__lowercase : Tuple = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__lowercase : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__lowercase : Tuple = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__lowercase : List[str] = False
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self:str ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
return 1_00
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
torch.manual_seed(0 )
snake_case__ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case__ = UNetaDConditionModel(**_a )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self:str ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
torch.manual_seed(0 )
snake_case__ = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.dummy_unet
snake_case__ = self.dummy_movq
snake_case__ = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
snake_case__ = DDIMScheduler(**_a )
snake_case__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:str , _a:Optional[int]=0 ):
snake_case__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
snake_case__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
snake_case__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
snake_case__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create hint
snake_case__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = '''cpu'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
snake_case__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = pipe(**self.get_dummy_inputs(_a ) )
snake_case__ = output.images
snake_case__ = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
snake_case__ = init_image.resize((5_12, 5_12) )
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
snake_case__ = torch.from_numpy(np.array(_a ) ).float() / 255.0
snake_case__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case__ = '''A robot, 4k photo'''
snake_case__ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
snake_case__ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
snake_case__ = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
snake_case__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case__ , snake_case__ = pipe_prior(
_a , image=_a , strength=0.85 , generator=_a , negative_prompt='''''' , ).to_tuple()
snake_case__ = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , hint=_a , generator=_a , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type='''np''' , )
snake_case__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(_a , _a )
| 208 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase__ : Tuple = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
snake_case__ = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
snake_case__ = get_sagemaker_input()
else:
snake_case__ = get_cluster_input()
return config
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase=None ) -> int:
if subparsers is not None:
snake_case__ = subparsers.add_parser('''config''' , description=__lowerCAmelCase )
else:
snake_case__ = argparse.ArgumentParser('''Accelerate config command''' , description=__lowerCAmelCase )
parser.add_argument(
'''--config_file''' , default=__lowerCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=__lowerCAmelCase )
return parser
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = get_user_input()
if args.config_file is not None:
snake_case__ = args.config_file
else:
if not os.path.isdir(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
snake_case__ = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(__lowerCAmelCase )
else:
config.to_yaml_file(__lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def SCREAMING_SNAKE_CASE ( ) -> Any:
snake_case__ = config_command_parser()
snake_case__ = parser.parse_args()
config_command(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 208 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
A__ : int = 'sew'
def __init__( self , _snake_case=32 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case=2 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.02 , _snake_case=1E-5 , _snake_case="group" , _snake_case="gelu" , _snake_case=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _snake_case=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _snake_case=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _snake_case=False , _snake_case=128 , _snake_case=16 , _snake_case=True , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case="mean" , _snake_case=False , _snake_case=False , _snake_case=256 , _snake_case=0 , _snake_case=1 , _snake_case=2 , **_snake_case , ) -> Union[str, Any]:
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
_UpperCamelCase : Any = hidden_size
_UpperCamelCase : Optional[Any] = feat_extract_norm
_UpperCamelCase : int = feat_extract_activation
_UpperCamelCase : List[str] = list(__snake_case )
_UpperCamelCase : Dict = list(__snake_case )
_UpperCamelCase : str = list(__snake_case )
_UpperCamelCase : Tuple = conv_bias
_UpperCamelCase : int = num_conv_pos_embeddings
_UpperCamelCase : int = num_conv_pos_embedding_groups
_UpperCamelCase : str = len(self.conv_dim )
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Tuple = intermediate_size
_UpperCamelCase : Dict = squeeze_factor
_UpperCamelCase : Optional[int] = hidden_act
_UpperCamelCase : Optional[Any] = num_attention_heads
_UpperCamelCase : List[Any] = hidden_dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : str = activation_dropout
_UpperCamelCase : Dict = feat_proj_dropout
_UpperCamelCase : str = final_dropout
_UpperCamelCase : List[str] = layerdrop
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : List[Any] = initializer_range
_UpperCamelCase : Optional[int] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : List[Any] = apply_spec_augment
_UpperCamelCase : int = mask_time_prob
_UpperCamelCase : str = mask_time_length
_UpperCamelCase : str = mask_time_min_masks
_UpperCamelCase : Union[str, Any] = mask_feature_prob
_UpperCamelCase : int = mask_feature_length
_UpperCamelCase : Any = mask_feature_min_masks
# ctc loss
_UpperCamelCase : Union[str, Any] = ctc_loss_reduction
_UpperCamelCase : List[Any] = ctc_zero_infinity
# sequence classification
_UpperCamelCase : Dict = use_weighted_layer_sum
_UpperCamelCase : Optional[Any] = classifier_proj_size
@property
def _lowercase ( self ) -> Any:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 683 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase: List[Any] = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Dict = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Tuple = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[Any] = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase: str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 526 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_a : List[str] = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = XLNetConfig.from_json_file(lowerCamelCase__ )
__UpperCAmelCase : Dict = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
__UpperCAmelCase : int = finetuning_task
__UpperCAmelCase : int = GLUE_TASKS_NUM_LABELS[finetuning_task]
__UpperCAmelCase : List[str] = XLNetForSequenceClassification(lowerCamelCase__ )
elif "squad" in finetuning_task:
__UpperCAmelCase : Optional[Any] = finetuning_task
__UpperCAmelCase : Dict = XLNetForQuestionAnswering(lowerCamelCase__ )
else:
__UpperCAmelCase : Tuple = XLNetLMHeadModel(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
__UpperCAmelCase : int = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : List[str] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
print(f"""Save PyTorch model to {os.path.abspath(lowerCamelCase__ )}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
print(f"""Save configuration file to {os.path.abspath(lowerCamelCase__ )}""" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_a : List[str] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 10 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_a : Tuple = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_a : List[Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : Optional[Any] = bs[:]
__UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Optional[Any] = char
return pairs
class __A (__magic_name__ ):
snake_case :Optional[int] = VOCAB_FILES_NAMES
snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCamelCase_ )
__UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Any = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCamelCase_ ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__UpperCAmelCase : int = get_pairs(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ )
__UpperCAmelCase : Dict = word
return word
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__UpperCAmelCase : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) )
return bpe_tokens
def _snake_case ( self , UpperCamelCase_ ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = "".join(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" )
__UpperCAmelCase : str = 0
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : str = token_index
writer.write(" ".join(UpperCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ):
__UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Tuple = " " + text
return (text, kwargs)
| 10 | 1 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCamelCase = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : str = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
_lowercase : Union[str, Any] = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _lowerCamelCase , )
is not None
):
_lowercase : Optional[int] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_lowercase : List[str] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_lowercase : str = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
_lowercase : Dict = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
_lowercase : List[Any] = True
if not attribute_used:
_lowercase : Optional[int] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_lowercase : Union[str, Any] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_lowercase : List[str] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_lowercase : Optional[Any] = True
elif attribute.endswith('_token_id' ):
_lowercase : Any = True
# configuration class specific cases
if not case_allowed:
_lowercase : Any = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_lowercase : List[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : Optional[int] = dict(inspect.signature(config_class.__init__ ).parameters )
_lowercase : Optional[int] = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
_lowercase : Optional[int] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_lowercase : Tuple = {}
if len(config_class.attribute_map ) > 0:
_lowercase : str = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_lowercase : List[Any] = inspect.getsourcefile(_lowerCamelCase )
_lowercase : Tuple = os.path.dirname(_lowerCamelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_lowercase : Tuple = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for fn in os.listdir(_lowerCamelCase ) if fn.startswith('modeling_' )]
# Get the source code strings
_lowercase : List[Any] = []
for path in modeling_paths:
if os.path.isfile(_lowerCamelCase ):
with open(_lowerCamelCase ) as fp:
modeling_sources.append(fp.read() )
_lowercase : Optional[Any] = []
for config_param, default_value in zip(_lowerCamelCase , _lowerCamelCase ):
# `attributes` here is all the variant names for `config_param`
_lowercase : Dict = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
unused_attributes.append(attributes[0] )
return sorted(_lowerCamelCase )
def __magic_name__ ( ) -> Any:
_lowercase : Any = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_lowercase : Optional[Any] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda SCREAMING_SNAKE_CASE : inspect.isclass(_lowerCamelCase )
and issubclass(_lowerCamelCase , _lowerCamelCase )
and inspect.getmodule(_lowerCamelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_lowercase : Dict = check_config_attributes_being_used(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
_lowercase : Union[str, Any] = unused_attributes
if len(_lowerCamelCase ) > 0:
_lowercase : str = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(_lowerCamelCase )
if __name__ == "__main__":
check_config_attributes()
| 66 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Optional[int] = DebertaTokenizer
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : Any = DebertaTokenizerFast
def UpperCamelCase ( self : Optional[Any] ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
lowerCamelCase_ = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
lowerCamelCase_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase_ = {'unk_token': '[UNK]'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__SCREAMING_SNAKE_CASE ) )
def UpperCamelCase ( self : Tuple , **__SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Any ) -> Dict:
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = 'lower newer'
return input_text, output_text
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCamelCase_ = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = tokenizer('Hello' , 'World' )
lowerCamelCase_ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , __SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase_ = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.encode(
'sequence builders' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
lowerCamelCase_ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase_ = tokenizer_class.from_pretrained('microsoft/deberta-base' )
lowerCamelCase_ = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
lowerCamelCase_ = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['input_ids']]
# fmt: off
lowerCamelCase_ = {
'input_ids': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase_ = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 549 | 0 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
a = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowercase (snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
for attribute in key.split(""".""" ):
lowerCAmelCase = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
lowerCAmelCase = getattr(snake_case__ , snake_case__ ).shape
else:
lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase (snake_case__ : Tuple , snake_case__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase = fairseq_model.state_dict()
lowerCAmelCase = hf_model.feature_extractor
lowerCAmelCase = hf_model.adapter
for name, value in fairseq_dict.items():
lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(snake_case__ )[0].split(""".""" )[-2]
lowerCAmelCase = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase = """weight_v"""
elif "bias" in name:
lowerCAmelCase = """bias"""
elif "weight" in name:
lowerCAmelCase = """weight"""
else:
lowerCAmelCase = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowercase (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Dict , snake_case__ : Dict ) -> str:
'''simple docstring'''
lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase = name.split(""".""" )
lowerCAmelCase = int(items[0] )
lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case__ )
def lowercase (snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : int ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = full_name.split("""adaptor.""" )[-1]
lowerCAmelCase = name.split(""".""" )
if items[1].isdigit():
lowerCAmelCase = int(items[1] )
else:
lowerCAmelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
lowerCAmelCase = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
lowerCAmelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
lowerCAmelCase = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
lowerCAmelCase = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(snake_case__ , snake_case__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
lowerCAmelCase = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
lowerCAmelCase = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case__ )
def lowercase (snake_case__ : Dict ) -> List[str]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCAmelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowercase (snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = WavaVecaConfig.from_pretrained(
snake_case__ , add_adapter=snake_case__ , adapter_stride=snake_case__ , adapter_kernel_size=snake_case__ , use_auth_token=snake_case__ , output_hidden_size=snake_case__ , )
lowerCAmelCase = MBartConfig.from_pretrained(snake_case__ )
# load model
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
lowerCAmelCase = model[0].eval()
# load feature extractor
lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(snake_case__ , use_auth_token=snake_case__ )
# set weights for wav2vec2 encoder
lowerCAmelCase = WavaVecaModel(snake_case__ )
recursively_load_weights_wavaveca(model.encoder , snake_case__ )
# load decoder weights
lowerCAmelCase = MBartForCausalLM(snake_case__ )
lowerCAmelCase , lowerCAmelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowerCAmelCase = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
lowerCAmelCase = False
lowerCAmelCase = MBartaaTokenizer(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
lowerCAmelCase = hf_wavavec.config.to_dict()
lowerCAmelCase = tokenizer.pad_token_id
lowerCAmelCase = tokenizer.bos_token_id
lowerCAmelCase = tokenizer.eos_token_id
lowerCAmelCase = """mbart50"""
lowerCAmelCase = """wav2vec2"""
lowerCAmelCase = tokenizer.eos_token_id
lowerCAmelCase = 250_004
lowerCAmelCase = tokenizer.eos_token_id
lowerCAmelCase = SpeechEncoderDecoderConfig.from_dict(snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1_0_2_4, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=2_5_0_0_0_4, type=int, help='`decoder_start_token_id` of model config')
a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 529 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Union[str, Any] ):
lowerCAmelCase = {}
def __lowercase ( self : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any]=1 ):
if self.graph.get(lowerCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCAmelCase = [[w, v]]
if not self.graph.get(lowerCAmelCase ):
lowerCAmelCase = []
def __lowercase ( self : Optional[int] ):
return list(self.graph )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict ):
if self.graph.get(lowerCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase )
def __lowercase ( self : List[str] , lowerCAmelCase : Tuple=-2 , lowerCAmelCase : List[Any]=-1 ):
if s == d:
return []
lowerCAmelCase = []
lowerCAmelCase = []
if s == -2:
lowerCAmelCase = list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase ) != 0:
lowerCAmelCase = stack[len(lowerCAmelCase ) - 1]
else:
lowerCAmelCase = ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return visited
def __lowercase ( self : Tuple , lowerCAmelCase : Any=-1 ):
if c == -1:
lowerCAmelCase = floor(random() * 1_0000 ) + 10
for i in range(lowerCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase , lowerCAmelCase , 1 )
def __lowercase ( self : Optional[int] , lowerCAmelCase : List[str]=-2 ):
lowerCAmelCase = deque()
lowerCAmelCase = []
if s == -2:
lowerCAmelCase = list(self.graph )[0]
d.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
while d:
lowerCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __lowercase ( self : List[Any] , lowerCAmelCase : Union[str, Any] ):
lowerCAmelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __lowercase ( self : int , lowerCAmelCase : Tuple ):
return len(self.graph[u] )
def __lowercase ( self : Optional[int] , lowerCAmelCase : Optional[Any]=-2 ):
lowerCAmelCase = []
lowerCAmelCase = []
if s == -2:
lowerCAmelCase = list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCAmelCase = s
lowerCAmelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase ) != 0:
lowerCAmelCase = stack[len(lowerCAmelCase ) - 1]
else:
lowerCAmelCase = ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return sorted_nodes
def __lowercase ( self : Any ):
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCAmelCase = -2
lowerCAmelCase = []
lowerCAmelCase = s
lowerCAmelCase = False
lowerCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase = len(lowerCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase = True
if len(lowerCAmelCase ) != 0:
lowerCAmelCase = stack[len(lowerCAmelCase ) - 1]
else:
lowerCAmelCase = False
indirect_parents.append(lowerCAmelCase )
lowerCAmelCase = s
lowerCAmelCase = ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return list(lowerCAmelCase )
def __lowercase ( self : Tuple ):
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCAmelCase = -2
lowerCAmelCase = []
lowerCAmelCase = s
lowerCAmelCase = False
lowerCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase = len(lowerCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase = True
if len(lowerCAmelCase ) != 0:
lowerCAmelCase = stack[len(lowerCAmelCase ) - 1]
else:
lowerCAmelCase = False
indirect_parents.append(lowerCAmelCase )
lowerCAmelCase = s
lowerCAmelCase = ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return False
def __lowercase ( self : List[Any] , lowerCAmelCase : Any=-2 , lowerCAmelCase : Tuple=-1 ):
lowerCAmelCase = time()
self.dfs(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = time()
return end - begin
def __lowercase ( self : int , lowerCAmelCase : str=-2 ):
lowerCAmelCase = time()
self.bfs(lowerCAmelCase )
lowerCAmelCase = time()
return end - begin
class SCREAMING_SNAKE_CASE__ :
def __init__( self : int ):
lowerCAmelCase = {}
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : str=1 ):
# check if the u exists
if self.graph.get(lowerCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCAmelCase = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCAmelCase = [[w, u]]
def __lowercase ( self : Dict , lowerCAmelCase : Any , lowerCAmelCase : int ):
if self.graph.get(lowerCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase )
# the other way round
if self.graph.get(lowerCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase )
def __lowercase ( self : Dict , lowerCAmelCase : Optional[int]=-2 , lowerCAmelCase : Optional[Any]=-1 ):
if s == d:
return []
lowerCAmelCase = []
lowerCAmelCase = []
if s == -2:
lowerCAmelCase = list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase ) != 0:
lowerCAmelCase = stack[len(lowerCAmelCase ) - 1]
else:
lowerCAmelCase = ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return visited
def __lowercase ( self : Any , lowerCAmelCase : Dict=-1 ):
if c == -1:
lowerCAmelCase = floor(random() * 1_0000 ) + 10
for i in range(lowerCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase , lowerCAmelCase , 1 )
def __lowercase ( self : int , lowerCAmelCase : Tuple=-2 ):
lowerCAmelCase = deque()
lowerCAmelCase = []
if s == -2:
lowerCAmelCase = list(self.graph )[0]
d.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
while d:
lowerCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __lowercase ( self : List[str] , lowerCAmelCase : str ):
return len(self.graph[u] )
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCAmelCase = -2
lowerCAmelCase = []
lowerCAmelCase = s
lowerCAmelCase = False
lowerCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase = len(lowerCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase = True
if len(lowerCAmelCase ) != 0:
lowerCAmelCase = stack[len(lowerCAmelCase ) - 1]
else:
lowerCAmelCase = False
indirect_parents.append(lowerCAmelCase )
lowerCAmelCase = s
lowerCAmelCase = ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return list(lowerCAmelCase )
def __lowercase ( self : int ):
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCAmelCase = -2
lowerCAmelCase = []
lowerCAmelCase = s
lowerCAmelCase = False
lowerCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase = len(lowerCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase = True
if len(lowerCAmelCase ) != 0:
lowerCAmelCase = stack[len(lowerCAmelCase ) - 1]
else:
lowerCAmelCase = False
indirect_parents.append(lowerCAmelCase )
lowerCAmelCase = s
lowerCAmelCase = ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return False
def __lowercase ( self : str ):
return list(self.graph )
def __lowercase ( self : Dict , lowerCAmelCase : Dict=-2 , lowerCAmelCase : List[str]=-1 ):
lowerCAmelCase = time()
self.dfs(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = time()
return end - begin
def __lowercase ( self : str , lowerCAmelCase : Union[str, Any]=-2 ):
lowerCAmelCase = time()
self.bfs(lowerCAmelCase )
lowerCAmelCase = time()
return end - begin
| 529 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Optional[int] = "funnel"
UpperCAmelCase__ : Optional[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self, SCREAMING_SNAKE_CASE_=3_0522, SCREAMING_SNAKE_CASE_=[4, 4, 4], SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu_new", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=1e-9, SCREAMING_SNAKE_CASE_="mean", SCREAMING_SNAKE_CASE_="relative_shift", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Dict:
UpperCamelCase : Dict = vocab_size
UpperCamelCase : str = block_sizes
UpperCamelCase : Optional[Any] = [1] * len(SCREAMING_SNAKE_CASE_ ) if block_repeats is None else block_repeats
assert len(SCREAMING_SNAKE_CASE_ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
UpperCamelCase : Tuple = num_decoder_layers
UpperCamelCase : Union[str, Any] = d_model
UpperCamelCase : int = n_head
UpperCamelCase : Optional[int] = d_head
UpperCamelCase : Optional[int] = d_inner
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Optional[Any] = hidden_dropout
UpperCamelCase : List[Any] = attention_dropout
UpperCamelCase : Dict = activation_dropout
UpperCamelCase : str = initializer_range
UpperCamelCase : List[str] = initializer_std
UpperCamelCase : Optional[int] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
UpperCamelCase : Union[str, Any] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
UpperCamelCase : Any = attention_type
UpperCamelCase : Dict = separate_cls
UpperCamelCase : Dict = truncate_seq
UpperCamelCase : Any = pool_q_only
super().__init__(**SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> List[str]:
return sum(self.block_sizes )
@num_hidden_layers.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def snake_case_ ( self ) -> List[Any]:
return len(self.block_sizes )
@num_blocks.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
| 40 |
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : int = [1]
for i in range(2 , snake_case__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase : List[Any] = []
UpperCamelCase : List[Any] = list(range(snake_case__ ) )
# Find permutation
while factorials:
UpperCamelCase : int = factorials.pop()
UpperCamelCase , UpperCamelCase : int = divmod(snake_case__ , snake_case__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
from pathlib import Path
import numpy as np
from PIL import Image
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Union[str, Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (gray > 127) & (gray <= 255)
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :Tuple = np.zeros_like(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Tuple = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCAmelCase__ :List[str] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCAmelCase__ :int = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCAmelCase__ :str = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__snake_case : int = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
__snake_case : List[Any] = np.array(Image.open(lena_path))
# kernel to be applied
__snake_case : Tuple = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__snake_case : Union[str, Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__snake_case : List[Any] = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 433 |
from math import isqrt
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :str = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ :List[Any] = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def A ( SCREAMING_SNAKE_CASE = 10**8 ):
"""simple docstring"""
UpperCAmelCase__ :Any = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ :Optional[Any] = 0
UpperCAmelCase__ :List[str] = 0
UpperCAmelCase__ :Dict = len(SCREAMING_SNAKE_CASE ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 433 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCAmelCase_ (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase : Tuple = 'levit'
def __init__( self: Dict , _UpperCAmelCase: Tuple=224 , _UpperCAmelCase: List[str]=3 , _UpperCAmelCase: List[str]=3 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Tuple=1 , _UpperCAmelCase: Tuple=16 , _UpperCAmelCase: List[str]=[128, 256, 384] , _UpperCAmelCase: Dict=[4, 8, 12] , _UpperCAmelCase: Optional[Any]=[4, 4, 4] , _UpperCAmelCase: Optional[Any]=[16, 16, 16] , _UpperCAmelCase: Dict=0 , _UpperCAmelCase: Union[str, Any]=[2, 2, 2] , _UpperCAmelCase: Optional[int]=[2, 2, 2] , _UpperCAmelCase: List[Any]=0.0_2 , **_UpperCAmelCase: int , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
_lowerCAmelCase :int = image_size
_lowerCAmelCase :Dict = num_channels
_lowerCAmelCase :int = kernel_size
_lowerCAmelCase :Dict = stride
_lowerCAmelCase :Optional[Any] = padding
_lowerCAmelCase :str = hidden_sizes
_lowerCAmelCase :Any = num_attention_heads
_lowerCAmelCase :int = depths
_lowerCAmelCase :int = key_dim
_lowerCAmelCase :int = drop_path_rate
_lowerCAmelCase :Optional[int] = patch_size
_lowerCAmelCase :Any = attention_ratio
_lowerCAmelCase :List[str] = mlp_ratio
_lowerCAmelCase :int = initializer_range
_lowerCAmelCase :Optional[int] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCAmelCase_ (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase : Tuple = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self: Any ):
return 1e-4 | 687 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Any = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'xglm'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any]=25_6008 , _SCREAMING_SNAKE_CASE: Dict=2048 , _SCREAMING_SNAKE_CASE: int=1024 , _SCREAMING_SNAKE_CASE: Dict=4096 , _SCREAMING_SNAKE_CASE: Optional[Any]=24 , _SCREAMING_SNAKE_CASE: int=16 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: Optional[Any]=0.1 , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: List[Any]=0.0 , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.0 , _SCREAMING_SNAKE_CASE: Any=0.02 , _SCREAMING_SNAKE_CASE: Any=True , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: str=1 , _SCREAMING_SNAKE_CASE: Dict=0 , _SCREAMING_SNAKE_CASE: Dict=2 , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : int = max_position_embeddings
__lowerCAmelCase : Optional[Any] = d_model
__lowerCAmelCase : List[Any] = ffn_dim
__lowerCAmelCase : int = num_layers
__lowerCAmelCase : Any = attention_heads
__lowerCAmelCase : int = activation_function
__lowerCAmelCase : List[Any] = dropout
__lowerCAmelCase : Optional[int] = attention_dropout
__lowerCAmelCase : Optional[int] = activation_dropout
__lowerCAmelCase : Optional[int] = layerdrop
__lowerCAmelCase : Optional[int] = init_std
__lowerCAmelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase : Dict = use_cache
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) | 293 | 0 |
'''simple docstring'''
import socket
def lowerCAmelCase__ ( ):
"""simple docstring"""
snake_case__ : Dict = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : Optional[Any] = socket.gethostname()
snake_case__ : Optional[int] = 1_2312
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
snake_case__ : Optional[int] = sock.recv(1024 )
if not data:
break
out_file.write(UpperCAmelCase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 172 |
'''simple docstring'''
class _A :
'''simple docstring'''
def __init__( self : List[Any] )-> List[str]:
snake_case__ : List[str] = """"""
snake_case__ : Dict = """"""
snake_case__ : Union[str, Any] = []
def __lowerCAmelCase ( self : Any , lowerCamelCase : int , lowerCamelCase : int )-> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
snake_case__ : Any = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
snake_case__ : List[Any] = self.__min_dist_top_down_dp(lowerCamelCase , n - 1 )
snake_case__ : Any = self.__min_dist_top_down_dp(m - 1 , lowerCamelCase )
snake_case__ : Optional[int] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
snake_case__ : Dict = 1 + min(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self.dp[m][n]
def __lowerCAmelCase ( self : List[str] , lowerCamelCase : str , lowerCamelCase : str )-> int:
snake_case__ : Optional[int] = worda
snake_case__ : List[str] = worda
snake_case__ : List[str] = [[-1 for _ in range(len(lowerCamelCase ) )] for _ in range(len(lowerCamelCase ) )]
return self.__min_dist_top_down_dp(len(lowerCamelCase ) - 1 , len(lowerCamelCase ) - 1 )
def __lowerCAmelCase ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : str )-> int:
snake_case__ : List[str] = worda
snake_case__ : int = worda
snake_case__ : Any = len(lowerCamelCase )
snake_case__ : List[str] = len(lowerCamelCase )
snake_case__ : List[str] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
snake_case__ : Union[str, Any] = j
elif j == 0: # second string is empty
snake_case__ : List[str] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
snake_case__ : Tuple = self.dp[i - 1][j - 1]
else:
snake_case__ : int = self.dp[i][j - 1]
snake_case__ : List[Any] = self.dp[i - 1][j]
snake_case__ : List[str] = self.dp[i - 1][j - 1]
snake_case__ : Tuple = 1 + min(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self.dp[m][n]
if __name__ == "__main__":
lowerCAmelCase__ = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
lowerCAmelCase__ = input('Enter the first string: ').strip()
lowerCAmelCase__ = input('Enter the second string: ').strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 172 | 1 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
"""simple docstring"""
__lowercase = len(UpperCamelCase__ )
__lowercase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowercase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowercase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowercase = subset[i - 1][j]
if arr[i - 1] <= j:
__lowercase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 616 |
"""simple docstring"""
from PIL import Image
def lowerCAmelCase_ ( UpperCamelCase__ : Image , UpperCamelCase__ : int ):
"""simple docstring"""
__lowercase = (259 * (level + 255)) / (255 * (259 - level))
def contrast(UpperCamelCase__ : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
UpperCAmelCase__ =change_contrast(img, 170)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 616 | 1 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__lowercase : Dict =""""""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class A ( tr.AbstractTransform ):
def __init__( self: int , _lowerCAmelCase: str = " " ) -> int:
'''simple docstring'''
UpperCAmelCase_ =sentence_delimiter
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: str ) -> Tuple:
'''simple docstring'''
return list(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =[]
for sent_idx, sentence in enumerate(_lowerCAmelCase ):
chars.extend(self.process_string(_lowerCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(_lowerCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
__lowercase : List[Any] =tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__lowercase : Any =tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__lowercase : Any ="""\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
__lowercase : Tuple ="""\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
__lowercase : str ="""
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Tuple , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any]=False ) -> str:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
_lowerCAmelCase , _lowerCAmelCase , truth_transform=_lowerCAmelCase , hypothesis_transform=_lowerCAmelCase , )["wer"]
UpperCAmelCase_ =0
UpperCAmelCase_ =0
for prediction, reference in zip(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =jiwer.compute_measures(
_lowerCAmelCase , _lowerCAmelCase , truth_transform=_lowerCAmelCase , hypothesis_transform=_lowerCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 550 |
import sys
import turtle
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowercase__ , get_mid(lowercase__ , lowercase__ ) , get_mid(lowercase__ , lowercase__ ) , depth - 1 )
triangle(lowercase__ , get_mid(lowercase__ , lowercase__ ) , get_mid(lowercase__ , lowercase__ ) , depth - 1 )
triangle(lowercase__ , get_mid(lowercase__ , lowercase__ ) , get_mid(lowercase__ , lowercase__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
__lowercase : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
__lowercase : str =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 550 | 1 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
UpperCamelCase ={
"E": 1_2.7_0,
"T": 9.0_6,
"A": 8.1_7,
"O": 7.5_1,
"I": 6.9_7,
"N": 6.7_5,
"S": 6.3_3,
"H": 6.0_9,
"R": 5.9_9,
"D": 4.2_5,
"L": 4.0_3,
"C": 2.7_8,
"U": 2.7_6,
"M": 2.4_1,
"W": 2.3_6,
"F": 2.2_3,
"G": 2.0_2,
"Y": 1.9_7,
"P": 1.9_3,
"B": 1.2_9,
"V": 0.9_8,
"K": 0.7_7,
"J": 0.1_5,
"X": 0.1_5,
"Q": 0.1_0,
"Z": 0.0_7,
}
UpperCamelCase ="ETAOINSHRDLCUMWFGYPBVKJXQZ"
UpperCamelCase ="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def snake_case ( a_ : str ) -> dict[str, int]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def snake_case ( a_ : tuple ) -> str:
"""simple docstring"""
return x[0]
def snake_case ( a_ : str ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[Any] = get_letter_count(a_ )
UpperCamelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(a_ )
UpperCamelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=a_ )
UpperCamelCase_ : Union[str, Any] = """""".join(freq_to_letter[freq] )
UpperCamelCase_ : Tuple = list(freq_to_letter_str.items() )
freq_pairs.sort(key=a_ , reverse=a_ )
UpperCamelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(a_ )
def snake_case ( a_ : str ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = get_frequency_order(a_ )
UpperCamelCase_ : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 208 |
'''simple docstring'''
def snake_case ( a_ : list[int] , a_ : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(a_ ) == len(a_ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Optional[int] = equationa
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : List[str] = equationa
# Calculate the determinants of the matrices
UpperCamelCase_ : Union[str, Any] = aa * ba - aa * ba
UpperCamelCase_ : Dict = ca * ba - ca * ba
UpperCamelCase_ : Dict = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCamelCase_ : List[Any] = determinant_x / determinant
UpperCamelCase_ : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 208 | 1 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ = "cpu" , UpperCAmelCase_ = None ):
UpperCAmelCase : Dict = torch.load(UpperCAmelCase_ , map_location=UpperCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(UpperCAmelCase_ , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
UpperCAmelCase : str = v.half()
if save_path is None: # overwrite src_path
UpperCAmelCase : str = src_path
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert)
| 710 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 0 |
"""simple docstring"""
from math import sqrt
def UpperCamelCase (SCREAMING_SNAKE_CASE = 100_0000 ):
UpperCamelCase : int = 0
UpperCamelCase : int = 0
UpperCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 102 | from abc import ABC, abstractmethod
from typing import List, Optional
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] ):
# test for the above condition
self.test()
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = 0
lowerCamelCase :Tuple = False
while not completed:
if counter == 1:
self.reset()
lowerCamelCase :Dict = self.advance()
if not self.does_advance(__snake_case ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = self.update(__snake_case )
counter += 1
if counter > 10000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def snake_case ( self : Any ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def snake_case ( self : Dict , __snake_case : int ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def snake_case ( self : Tuple , __snake_case : int ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def snake_case ( self : str ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def snake_case ( self : Optional[Any] ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def snake_case ( self : List[Any] , __snake_case : List[str]=False ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Any , __snake_case : List[int] ):
super(__snake_case , self ).__init__()
if not isinstance(__snake_case , __snake_case ) or len(__snake_case ) == 0:
raise ValueError(F"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(__snake_case , __snake_case ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
lowerCamelCase :Union[str, Any] = token_ids
lowerCamelCase :Optional[int] = len(self.token_ids )
lowerCamelCase :Dict = -1 # the index of the currently fulfilled step
lowerCamelCase :Any = False
def snake_case ( self : Any ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def snake_case ( self : Dict , __snake_case : int ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(__snake_case )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def snake_case ( self : Union[str, Any] , __snake_case : int ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(__snake_case )}" )
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :Dict = False
lowerCamelCase :Tuple = False
if self.does_advance(__snake_case ):
self.fulfilled_idx += 1
lowerCamelCase :str = True
if self.fulfilled_idx == (self.seqlen - 1):
lowerCamelCase :Optional[Any] = True
lowerCamelCase :Dict = completed
else:
# failed to make progress.
lowerCamelCase :Any = True
self.reset()
return stepped, completed, reset
def snake_case ( self : List[str] ):
lowerCamelCase :List[str] = False
lowerCamelCase :List[str] = 0
def snake_case ( self : List[str] ):
return self.seqlen - (self.fulfilled_idx + 1)
def snake_case ( self : Union[str, Any] , __snake_case : Optional[int]=False ):
lowerCamelCase :Dict = PhrasalConstraint(self.token_ids )
if stateful:
lowerCamelCase :Any = self.seqlen
lowerCamelCase :str = self.fulfilled_idx
lowerCamelCase :Optional[int] = self.completed
return new_constraint
class _lowerCAmelCase :
def __init__( self : Optional[int] , __snake_case : List[List[int]] , __snake_case : Optional[Any]=True ):
lowerCamelCase :int = max([len(__snake_case ) for one in nested_token_ids] )
lowerCamelCase :Optional[Any] = {}
for token_ids in nested_token_ids:
lowerCamelCase :Tuple = root
for tidx, token_id in enumerate(__snake_case ):
if token_id not in level:
lowerCamelCase :List[str] = {}
lowerCamelCase :List[str] = level[token_id]
if no_subsets and self.has_subsets(__snake_case , __snake_case ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F" {nested_token_ids}." )
lowerCamelCase :Optional[Any] = root
def snake_case ( self : List[Any] , __snake_case : List[str] ):
lowerCamelCase :Tuple = self.trie
for current_token in current_seq:
lowerCamelCase :Tuple = start[current_token]
lowerCamelCase :Union[str, Any] = list(start.keys() )
return next_tokens
def snake_case ( self : Optional[Any] , __snake_case : List[Any] ):
lowerCamelCase :List[Any] = self.next_tokens(__snake_case )
return len(__snake_case ) == 0
def snake_case ( self : Optional[int] , __snake_case : Optional[int] ):
lowerCamelCase :Optional[Any] = list(root.values() )
if len(__snake_case ) == 0:
return 1
else:
return sum([self.count_leaves(__snake_case ) for nn in next_nodes] )
def snake_case ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any ):
lowerCamelCase :Tuple = self.count_leaves(__snake_case )
return len(__snake_case ) != leaf_count
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : int , __snake_case : List[List[int]] ):
super(__snake_case , self ).__init__()
if not isinstance(__snake_case , __snake_case ) or len(__snake_case ) == 0:
raise ValueError(F"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(__snake_case , __snake_case ) for token_ids in nested_token_ids ):
raise ValueError(F"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(__snake_case , __snake_case ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
lowerCamelCase :int = DisjunctiveTrie(__snake_case )
lowerCamelCase :Union[str, Any] = nested_token_ids
lowerCamelCase :Dict = self.trie.max_height
lowerCamelCase :Any = []
lowerCamelCase :Dict = False
def snake_case ( self : Optional[int] ):
lowerCamelCase :Optional[int] = self.trie.next_tokens(self.current_seq )
if len(__snake_case ) == 0:
return None
else:
return token_list
def snake_case ( self : str , __snake_case : int ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(__snake_case )}" )
lowerCamelCase :Optional[Any] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def snake_case ( self : Optional[Any] , __snake_case : int ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(__snake_case )}" )
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[int] = False
lowerCamelCase :Union[str, Any] = False
if self.does_advance(__snake_case ):
self.current_seq.append(__snake_case )
lowerCamelCase :List[Any] = True
else:
lowerCamelCase :List[str] = True
self.reset()
lowerCamelCase :Union[str, Any] = self.trie.reached_leaf(self.current_seq )
lowerCamelCase :Optional[int] = completed
return stepped, completed, reset
def snake_case ( self : List[str] ):
lowerCamelCase :str = False
lowerCamelCase :Tuple = []
def snake_case ( self : List[str] ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def snake_case ( self : Dict , __snake_case : List[Any]=False ):
lowerCamelCase :str = DisjunctiveConstraint(self.token_ids )
if stateful:
lowerCamelCase :List[Any] = self.seqlen
lowerCamelCase :List[str] = self.current_seq
lowerCamelCase :Optional[Any] = self.completed
return new_constraint
class _lowerCAmelCase :
def __init__( self : List[str] , __snake_case : List[Constraint] ):
lowerCamelCase :str = constraints
# max # of steps required to fulfill a given constraint
lowerCamelCase :Union[str, Any] = max([c.seqlen for c in constraints] )
lowerCamelCase :Union[str, Any] = len(__snake_case )
lowerCamelCase :List[Any] = False
self.init_state()
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = []
lowerCamelCase :Optional[Any] = None
lowerCamelCase :Tuple = [constraint.copy(stateful=__snake_case ) for constraint in self.constraints]
def snake_case ( self : Optional[int] ):
lowerCamelCase :Optional[Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :str = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowerCamelCase :Any = constraint.advance()
if isinstance(__snake_case , __snake_case ):
token_list.append(__snake_case )
elif isinstance(__snake_case , __snake_case ):
token_list.extend(__snake_case )
else:
lowerCamelCase :int = self.inprogress_constraint.advance()
if isinstance(__snake_case , __snake_case ):
token_list.append(__snake_case )
elif isinstance(__snake_case , __snake_case ):
token_list.extend(__snake_case )
if len(__snake_case ) == 0:
return None
else:
return token_list
def snake_case ( self : List[str] , __snake_case : Optional[List[int]] ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowerCamelCase , lowerCamelCase :Tuple = self.add(__snake_case )
# the entire list of constraints are fulfilled
if self.completed:
break
def snake_case ( self : Dict , __snake_case : int ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError(F"`token_id` should be an `int`, but is `{token_id}`." )
lowerCamelCase , lowerCamelCase :Optional[int] = False, False
if self.completed:
lowerCamelCase :Any = True
lowerCamelCase :Tuple = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowerCamelCase , lowerCamelCase , lowerCamelCase :int = self.inprogress_constraint.update(__snake_case )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__snake_case ) )
lowerCamelCase :Union[str, Any] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowerCamelCase :Dict = None
if len(self.pending_constraints ) == 0:
# we're done!
lowerCamelCase :int = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__snake_case ):
lowerCamelCase , lowerCamelCase , lowerCamelCase :Tuple = pending_constraint.update(__snake_case )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(__snake_case )
lowerCamelCase :Tuple = None
if not complete and stepped:
lowerCamelCase :Union[str, Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowerCamelCase :Optional[int] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowerCamelCase :Optional[int] = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def snake_case ( self : str , __snake_case : Any=True ):
lowerCamelCase :Union[str, Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowerCamelCase :int = [
constraint.copy(stateful=__snake_case ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowerCamelCase :Optional[int] = self.inprogress_constraint.copy(stateful=__snake_case )
lowerCamelCase :Union[str, Any] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 166 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> bool:
"""simple docstring"""
if index == len(lowercase_ ):
return True
# Recursive Step
for i in range(lowercase_ ):
if valid_coloring(graph[index] , lowercase_ , lowercase_ ):
# Color current vertex
__UpperCamelCase = i
# Validate coloring
if util_color(lowercase_ , lowercase_ , lowercase_ , index + 1 ):
return True
# Backtrack
__UpperCamelCase = -1
return False
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list[int]:
"""simple docstring"""
__UpperCamelCase = [-1] * len(lowercase_ )
if util_color(lowercase_ , lowercase_ , lowercase_ , 0 ):
return colored_vertices
return []
| 375 |
import os
import time
import numpy as np
import onnxruntime as ort
a_ = "1"
a_ = "0"
a_ = "1"
a_ = ort.SessionOptions()
a_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
a_ = ["TensorrtExecutionProvider", "CUDAExecutionProvider"]
a_ = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
a_ = ort.RunOptions()
a_ = 128
a_ = 1
a_ = np.ones((batch, sequence), dtype=np.intaa)
a_ = np.ones((batch, sequence), dtype=np.intaa)
a_ = np.ones((batch, sequence), dtype=np.intaa)
print("Warm up phase...")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Start inference...")
a_ = time.time()
a_ = 2000
a_ = {}
for iter in range(max_iters):
a_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 1000 / max_iters))
| 375 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( _UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : Dict = KandinskyVaaControlnetPipeline
UpperCamelCase : List[str] = ["image_embeds", "negative_image_embeds", "hint"]
UpperCamelCase : Optional[int] = ["image_embeds", "negative_image_embeds", "hint"]
UpperCamelCase : List[str] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCamelCase : str = False
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return 3_2
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return 3_2
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return 1_0_0
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase = UNetaDConditionModel(**__magic_name__ )
return model
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.dummy_unet
_lowerCAmelCase = self.dummy_movq
_lowerCAmelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , steps_offset=1 , prediction_type='epsilon' , thresholding=__magic_name__ , )
_lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=0 ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__magic_name__ )
# create hint
_lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
if str(__magic_name__ ).startswith('mps' ):
_lowerCAmelCase = torch.manual_seed(__magic_name__ )
else:
_lowerCAmelCase = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
_lowerCAmelCase = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'cpu'
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**__magic_name__ )
_lowerCAmelCase = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = pipe(**self.get_dummy_inputs(__magic_name__ ) )
_lowerCAmelCase = output.images
_lowerCAmelCase = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCAmelCase = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
_lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
_lowerCAmelCase = torch.from_numpy(np.array(__magic_name__ ) ).float() / 2_55.0
_lowerCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
_lowerCAmelCase = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
_lowerCAmelCase = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = 'A robot, 4k photo'
_lowerCAmelCase = torch.Generator(device='cuda' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase = torch.Generator(device='cuda' ).manual_seed(0 )
_lowerCAmelCase = pipeline(
image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , hint=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_0_0 , output_type='np' , )
_lowerCAmelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 589 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
a__ : Any = None
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
a__ : Tuple = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
a__ : Any = {
"""camembert-base""": 512,
}
a__ : List[Any] = """▁"""
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : Any = VOCAB_FILES_NAMES
UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : int = ["input_ids", "attention_mask"]
UpperCamelCase : int = CamembertTokenizer
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__=["<s>NOTUSED", "</s>NOTUSED"] , **__magic_name__ , ):
"""simple docstring"""
_lowerCAmelCase = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
_lowerCAmelCase = vocab_file
_lowerCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
_lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__magic_name__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
__magic_name__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ):
copyfile(self.vocab_file , __magic_name__ )
return (out_vocab_file,)
| 589 | 1 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__lowercase = logging.getLogger(__name__)
class _lowercase ( __lowerCamelCase ):
_lowercase : Optional[Any] = 'summarization'
_lowercase : List[str] = ['loss']
_lowercase : Union[str, Any] = ROUGE_KEYS
_lowercase : List[str] = 'rouge2'
def __init__( self : Union[str, Any] , lowerCamelCase__ : str , **lowerCamelCase__ : int ) -> List[str]:
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
A_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(lowerCamelCase__ , num_labels=lowerCamelCase__ , mode=self.mode , **lowerCamelCase__ )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
A_ = Path(self.output_dir ) / '''metrics.json'''
A_ = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
A_ = 0
A_ = defaultdict(lowerCamelCase__ )
A_ = self.config.model_type
A_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
A_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
A_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
A_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], F"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
A_ = get_git_info()['''repo_sha''']
A_ = hparams.num_workers
A_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCamelCase__ ):
A_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A_ = self.decoder_start_token_id
A_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
A_ = False
A_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A_ = self.hparams.eval_max_gen_length
else:
A_ = self.model.config.max_length
A_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : Dict[str, torch.Tensor] ) -> Dict[str, List[str]]:
"""simple docstring"""
A_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(lowerCamelCase__ , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
A_ = True
return readable_batch
def UpperCamelCase ( self : Tuple , lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ) -> str:
"""simple docstring"""
return self.model(lowerCamelCase__ , **lowerCamelCase__ )
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : List[int] ) -> Any:
"""simple docstring"""
A_ = self.tokenizer.batch_decode(
lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
return lmap(str.strip , lowerCamelCase__ )
def UpperCamelCase ( self : Dict , lowerCamelCase__ : dict ) -> Tuple:
"""simple docstring"""
A_ = self.tokenizer.pad_token_id
A_ ,A_ = batch['''input_ids'''], batch['''attention_mask''']
A_ = batch['''labels''']
if isinstance(self.model , lowerCamelCase__ ):
A_ = self.model._shift_right(lowerCamelCase__ )
else:
A_ = shift_tokens_right(lowerCamelCase__ , lowerCamelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A_ = decoder_input_ids
self.save_readable_batch(lowerCamelCase__ )
A_ = self(lowerCamelCase__ , attention_mask=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ , use_cache=lowerCamelCase__ )
A_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A_ = nn.CrossEntropyLoss(ignore_index=lowerCamelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
A_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
A_ = nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )
A_ ,A_ = label_smoothed_nll_loss(
lowerCamelCase__ , lowerCamelCase__ , self.hparams.label_smoothing , ignore_index=lowerCamelCase__ )
return (loss,)
@property
def UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.tokenizer.pad_token_id
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
A_ = self._step(lowerCamelCase__ )
A_ = dict(zip(self.loss_names , lowerCamelCase__ ) )
# tokens per batch
A_ = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
A_ = batch['''input_ids'''].shape[0]
A_ = batch['''input_ids'''].eq(self.pad ).sum()
A_ = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase ( self : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self._generative_step(lowerCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any]="val" ) -> Dict:
"""simple docstring"""
self.step_count += 1
A_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
A_ = losses['''loss''']
A_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
A_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A_ = torch.tensor(lowerCamelCase__ ).type_as(lowerCamelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCamelCase__ )
A_ = {F"{prefix}_avg_{k}": x for k, x in losses.items()}
A_ = self.step_count
self.metrics[prefix].append(lowerCamelCase__ ) # callback writes this to self.metrics_save_path
A_ = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"{prefix}_loss": loss,
F"{prefix}_{self.val_metric}": metric_tensor,
}
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int ) -> Dict:
"""simple docstring"""
return calculate_rouge(lowerCamelCase__ , lowerCamelCase__ )
def UpperCamelCase ( self : List[str] , lowerCamelCase__ : dict ) -> dict:
"""simple docstring"""
A_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=lowerCamelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
A_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
A_ = self.ids_to_clean_text(lowerCamelCase__ )
A_ = self.ids_to_clean_text(batch['''labels'''] )
A_ = self._step(lowerCamelCase__ )
A_ = dict(zip(self.loss_names , lowerCamelCase__ ) )
A_ = self.calc_generative_metrics(lowerCamelCase__ , lowerCamelCase__ )
A_ = np.mean(lmap(lowerCamelCase__ , lowerCamelCase__ ) )
base_metrics.update(gen_time=lowerCamelCase__ , gen_len=lowerCamelCase__ , preds=lowerCamelCase__ , target=lowerCamelCase__ , **lowerCamelCase__ )
return base_metrics
def UpperCamelCase ( self : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
return self._generative_step(lowerCamelCase__ )
def UpperCamelCase ( self : int , lowerCamelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
return self.validation_epoch_end(lowerCamelCase__ , prefix='''test''' )
def UpperCamelCase ( self : List[str] , lowerCamelCase__ : Dict ) -> SeqaSeqDataset:
"""simple docstring"""
A_ = self.n_obs[type_path]
A_ = self.target_lens[type_path]
A_ = self.dataset_class(
self.tokenizer , type_path=lowerCamelCase__ , n_obs=lowerCamelCase__ , max_target_length=lowerCamelCase__ , **self.dataset_kwargs , )
return dataset
def UpperCamelCase ( self : Dict , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : bool = False ) -> DataLoader:
"""simple docstring"""
A_ = self.get_dataset(lowerCamelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A_ = dataset.make_sortish_sampler(lowerCamelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase__ , batch_size=lowerCamelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase__ , num_workers=self.num_workers , sampler=lowerCamelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase__ , batch_sampler=lowerCamelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCamelCase__ , batch_size=lowerCamelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase__ , num_workers=self.num_workers , sampler=lowerCamelCase__ , )
def UpperCamelCase ( self : str ) -> DataLoader:
"""simple docstring"""
A_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=lowerCamelCase__ )
return dataloader
def UpperCamelCase ( self : List[str] ) -> DataLoader:
"""simple docstring"""
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def UpperCamelCase ( self : List[Any] ) -> DataLoader:
"""simple docstring"""
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> Tuple:
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowerCamelCase__ , lowerCamelCase__ )
add_generic_args(lowerCamelCase__ , lowerCamelCase__ )
parser.add_argument(
'''--max_source_length''' , default=1_0_2_4 , type=lowerCamelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=5_6 , type=lowerCamelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=1_4_2 , type=lowerCamelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=1_4_2 , type=lowerCamelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=lowerCamelCase__ )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=lowerCamelCase__ )
parser.add_argument('''--max_tokens_per_batch''' , type=lowerCamelCase__ , default=lowerCamelCase__ )
parser.add_argument('''--logger_name''' , type=lowerCamelCase__ , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=lowerCamelCase__ , default=-1 , required=lowerCamelCase__ , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=lowerCamelCase__ , default=5_0_0 , required=lowerCamelCase__ , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=lowerCamelCase__ , default=-1 , required=lowerCamelCase__ , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=lowerCamelCase__ , default='''summarization''' , required=lowerCamelCase__ , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=lowerCamelCase__ , default=0.0 , required=lowerCamelCase__ )
parser.add_argument('''--src_lang''' , type=lowerCamelCase__ , default='''''' , required=lowerCamelCase__ )
parser.add_argument('''--tgt_lang''' , type=lowerCamelCase__ , default='''''' , required=lowerCamelCase__ )
parser.add_argument('''--eval_beams''' , type=lowerCamelCase__ , default=lowerCamelCase__ , required=lowerCamelCase__ )
parser.add_argument(
'''--val_metric''' , type=lowerCamelCase__ , default=lowerCamelCase__ , required=lowerCamelCase__ , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=lowerCamelCase__ , default=lowerCamelCase__ , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=lowerCamelCase__ , default=1 , required=lowerCamelCase__ , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=lowerCamelCase__ , default=-1 , required=lowerCamelCase__ , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class _lowercase ( __lowerCamelCase ):
_lowercase : Any = 'translation'
_lowercase : Dict = ['loss']
_lowercase : int = ['bleu']
_lowercase : Any = 'bleu'
def __init__( self : Dict , lowerCamelCase__ : str , **lowerCamelCase__ : Dict ) -> str:
"""simple docstring"""
super().__init__(lowerCamelCase__ , **lowerCamelCase__ )
A_ = hparams.src_lang
A_ = hparams.tgt_lang
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple ) -> dict:
"""simple docstring"""
return calculate_bleu(lowerCamelCase__ , lowerCamelCase__ )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
check_output_dir(SCREAMING_SNAKE_CASE , expected_items=3 )
if model is None:
if "summarization" in args.task:
A_ = SummarizationModule(SCREAMING_SNAKE_CASE )
else:
A_ = TranslationModule(SCREAMING_SNAKE_CASE )
A_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
A_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A_ = os.environ.get('''WANDB_PROJECT''' , SCREAMING_SNAKE_CASE )
A_ = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A_ = WandbLogger(name=model.output_dir.name , project=f"hf_{dataset}" )
if args.early_stopping_patience >= 0:
A_ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
A_ = False
A_ = args.val_metric == '''loss'''
A_ = generic_train(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE ) , early_stopping_callback=SCREAMING_SNAKE_CASE , logger=SCREAMING_SNAKE_CASE , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
A_ = ''''''
A_ = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=SCREAMING_SNAKE_CASE ) )
if checkpoints:
A_ = checkpoints[-1]
A_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
__lowercase = pl.Trainer.add_argparse_args(parser)
__lowercase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__lowercase = parser.parse_args()
main(args)
| 563 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__lowercase = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
"""
__lowercase = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
__lowercase = """
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"pearson\": Pearson Correlation
\"spearmanr\": Spearman Correlation
\"matthews_correlation\": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'stsb')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})
{'pearson': 1.0, 'spearmanr': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'cola')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return float((preds == labels).mean() )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
A_ = float(spearmanr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase__ , lowerCamelCase__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowerCamelCase__ , lowerCamelCase__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowerCamelCase__ , lowerCamelCase__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 563 | 1 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
UpperCamelCase : List[str] = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
UpperCamelCase : Any = parser.parse_args()
UpperCamelCase : Tuple = """cpu"""
UpperCamelCase : int = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
UpperCamelCase : List[Any] = """path-to-your-trained-model"""
UpperCamelCase : Dict = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
UpperCamelCase : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
UpperCamelCase : Dict = pipe.to(device)
# to channels last
UpperCamelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
UpperCamelCase : Union[str, Any] = pipe.vae.to(memory_format=torch.channels_last)
UpperCamelCase : str = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
UpperCamelCase : str = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
UpperCamelCase : str = torch.randn(2, 4, 64, 64)
UpperCamelCase : Union[str, Any] = torch.rand(1) * 999
UpperCamelCase : Any = torch.randn(2, 77, 768)
UpperCamelCase : Tuple = (sample, timestep, encoder_hidden_status)
try:
UpperCamelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
UpperCamelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
UpperCamelCase : str = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
UpperCamelCase : Tuple = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
UpperCamelCase : Any = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
UpperCamelCase : Optional[int] = 666
UpperCamelCase : Any = torch.Generator(device).manual_seed(seed)
UpperCamelCase : int = {"""generator""": generator}
if args.steps is not None:
UpperCamelCase : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
UpperCamelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 37 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : complex , _lowerCamelCase : str = "x" , _lowerCamelCase : float = 10**-10 , _lowerCamelCase : int = 1 , ) -> complex:
lowerCamelCase_ = symbols(_lowerCamelCase )
lowerCamelCase_ = lambdify(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase_ = starting_point
while True:
if diff_function(_lowerCamelCase ) != 0:
lowerCamelCase_ = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function(
_lowerCamelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowerCamelCase_ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}''')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F'''{newton_raphson('exp(x) - 1', 10, precision=0.005)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 549 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = len(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_lowerCamelCase : Tuple = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = list(range(10, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 710 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
snake_case = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
snake_case = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowercase , lowercase , lowercase )
order.append(lowercase )
return order
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowercase , lowercase , lowercase )
return component
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) * [False]
SCREAMING_SNAKE_CASE : dict[int, list[int]] = {vert: [] for vert in range(len(lowercase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i, was_visited in enumerate(lowercase ):
if not was_visited:
order += topology_sort(lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : List[str] = len(lowercase ) * [False]
for i in range(len(lowercase ) ):
SCREAMING_SNAKE_CASE : str = order[len(lowercase ) - i - 1]
if not visited[vert]:
SCREAMING_SNAKE_CASE : int = find_components(lowercase , lowercase , lowercase )
components_list.append(lowercase )
return components_list
| 62 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int:
if len(__snake_case ) != len(__snake_case ):
raise ValueError('String lengths must match!' )
__A : Optional[Any] = 0
for chara, chara in zip(__snake_case , __snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A: Dict = logging.get_logger(__name__)
_A: str = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : str = """vivit"""
def __init__( self , __A=224 , __A=32 , __A=[2, 16, 16] , __A=3 , __A=768 , __A=12 , __A=12 , __A=3_072 , __A="gelu_fast" , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1E-06 , __A=True , **__A , ):
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = image_size
__UpperCAmelCase = num_frames
__UpperCAmelCase = tubelet_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = qkv_bias
super().__init__(**__A )
| 710 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase ( UpperCAmelCase_ ):
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=False , __A=True , __A="None" , __A=3 , __A=4 , __A=None , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = relative_attention
__UpperCAmelCase = position_biased_input
__UpperCAmelCase = pos_att_type
__UpperCAmelCase = scope
def __lowerCamelCase ( self ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __lowerCamelCase ( self , __A ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = DebertaVaModel(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A )[0]
__UpperCAmelCase = model(__A , token_type_ids=__A )[0]
__UpperCAmelCase = model(__A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = DebertaVaForMaskedLM(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = DebertaVaForSequenceClassification(__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__A )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = DebertaVaForTokenClassification(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = DebertaVaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = DebertaVaForMultipleChoice(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
_A : Optional[Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_A : int = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : List[str] = True
_A : Union[str, Any] = False
_A : int = False
_A : Dict = False
_A : int = False
def __lowerCamelCase ( self ):
__UpperCAmelCase = DebertaVaModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__A , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__A )
@slow
def __lowerCamelCase ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = DebertaVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __lowerCamelCase ( self ):
pass
@slow
def __lowerCamelCase ( self ):
__UpperCAmelCase = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__UpperCAmelCase = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase = model(__A , attention_mask=__A )[0]
# compare the actual values for a slice.
__UpperCAmelCase = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
| 617 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=4 , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_attention_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_choices
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_attention_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
_A, _A, _A, _A = config_and_inputs
_A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = True
__magic_name__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self ):
_A = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase__ ( self ):
for model_class_name in self.all_model_classes:
_A = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=snake_case_ )
_A = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
@require_flax
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_A = jnp.array([[0, 1, 2, 3, 4, 5]] )
_A = model(snake_case_ )[0]
_A = 5_0000
_A = (1, 6, vocab_size)
self.assertEqual(output.shape , snake_case_ )
_A = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
| 27 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = (DPMSolverSDEScheduler,)
UpperCAmelCase_ = 10
def A_ ( self : List[str], **_UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**_UpperCAmelCase )
return config
def A_ ( self : Tuple ) -> int:
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def A_ ( self : int ) -> int:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase, beta_end=_UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : int = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : Dict = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : str = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def A_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : Tuple = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE__ : str = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def A_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps, device=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : int = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE__ : Any = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Tuple = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def A_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Any = scheduler_class(**_UpperCAmelCase, use_karras_sigmas=_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps, device=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(_UpperCAmelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : str = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE__ : List[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 663 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 390 | from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = ["image_processor", "tokenizer"]
A : Union[str, Any] = "BridgeTowerImageProcessor"
A : Union[str, Any] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : str ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : int = 0 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , **_lowerCAmelCase : Tuple , ):
__snake_case : Dict = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
# add pixel_values + pixel_mask
__snake_case : str = self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , do_normalize=_lowerCAmelCase , do_center_crop=_lowerCAmelCase , **_lowerCAmelCase )
encoding.update(_lowerCAmelCase )
return encoding
def snake_case__ ( self : List[Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : str ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Tuple , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Union[str, Any] ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def snake_case__ ( self : List[Any] ):
__snake_case : int = self.tokenizer.model_input_names
__snake_case : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 390 | 1 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ):
super().__init__()
UpperCamelCase__ :Tuple = value_function
UpperCamelCase__ :Optional[int] = unet
UpperCamelCase__ :List[str] = scheduler
UpperCamelCase__ :Dict = env
UpperCamelCase__ :Dict = env.get_dataset()
UpperCamelCase__ :Union[str, Any] = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase__ :Any = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase__ :List[Any] = env.observation_space.shape[0]
UpperCamelCase__ :List[str] = env.action_space.shape[0]
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ):
return (x_in - self.means[key]) / self.stds[key]
def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
return x_in * self.stds[key] + self.means[key]
def __a ( self :Any , lowerCamelCase__ :int ):
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
for key, val in cond.items():
UpperCamelCase__ :str = val.clone()
return x_in
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = x.shape[0]
UpperCamelCase__ :List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ )
UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance )
UpperCamelCase__ :Dict = model_std * grad
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Dict = x.detach()
UpperCamelCase__ :int = x + scale * grad
UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ):
# normalize the observations and create batch dimension
UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" )
UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 )
UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )}
UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device )
UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
UpperCamelCase__ :Dict = x[sorted_idx]
UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim]
UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy()
UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCamelCase__ :List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ )
UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0]
return denorm_actions | 45 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=3 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_input_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_labels
lowerCamelCase = num_choices
lowerCamelCase = scope
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_input_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_a , )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = FalconModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a )
lowerCamelCase = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCamelCase = True
lowerCamelCase = FalconModel(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , )
lowerCamelCase = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCamelCase = FalconForCausalLM(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = FalconForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , use_cache=_a , )
lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_hidden_states=_a , )["""hidden_states"""][0]
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )["""hidden_states"""][0]
# select random slice
lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (FalconForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FalconModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a , hidden_size=37 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , *lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCamelCase = alibi
self.model_tester.create_and_check_model(_a , *_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = input_ids.ne(1 ).to(_a )
lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase = FalconForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = """single_label_classification"""
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = input_ids.ne(1 ).to(_a )
lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase = FalconForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = FalconForCausalLM(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , use_cache=_a )
lowerCamelCase = input_ids.shape[0]
lowerCamelCase = model._convert_to_rw_cache(result.past_key_values )
lowerCamelCase = model._convert_cache_to_standard_format(_a , _a )
for layer in range(len(_a ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = """multi_label_classification"""
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = input_ids.ne(1 ).to(_a )
lowerCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase = FalconForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_a , """use_cache""" ):
return
lowerCamelCase = model_class(_a ).to(_a )
if "use_cache" not in inputs:
lowerCamelCase = True
lowerCamelCase = model(**_a )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCamelCase = (
getattr(_a , """decoder_layers""" , _a )
or getattr(_a , """num_decoder_layers""" , _a )
or config.num_hidden_layers
)
lowerCamelCase = getattr(_a , """num_kv_heads""" , config.num_attention_heads )
lowerCamelCase = getattr(_a , """d_model""" , config.hidden_size )
lowerCamelCase = embed_dim // num_attention_heads
lowerCamelCase = outputs["""past_key_values"""]
self.assertEqual(len(_a ) , _a )
lowerCamelCase , lowerCamelCase = inputs["""input_ids"""].shape
for i in range(_a ):
if config.new_decoder_architecture:
lowerCamelCase = config.num_attention_heads
elif config.multi_query:
lowerCamelCase = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
lowerCamelCase = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(_a )
lowerCamelCase = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_a )
lowerCamelCase = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
lowerCamelCase = model.generate(**_a , do_sample=_a , max_new_tokens=19 )
lowerCamelCase = tokenizer.batch_decode(_a )[0]
self.assertEqual(_a , _a )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCamelCase = AutoTokenizer.from_pretrained(_a )
lowerCamelCase = FalconForCausalLM.from_pretrained(_a )
model.eval()
model.to(_a )
lowerCamelCase = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_a )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_a , do_sample=_a , max_new_tokens=4 )
model.generate(**_a , do_sample=_a , max_new_tokens=4 )
model.generate(**_a , num_beams=2 , max_new_tokens=4 )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCamelCase = AutoTokenizer.from_pretrained(_a )
lowerCamelCase = FalconForCausalLM.from_pretrained(_a )
model.eval()
model.to(device=_a )
lowerCamelCase = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_a )
# Test results are the same with and without cache
lowerCamelCase = model.generate(**_a , do_sample=_a , max_new_tokens=20 , use_cache=_a )
lowerCamelCase = model.generate(**_a , do_sample=_a , max_new_tokens=20 , use_cache=_a )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 543 | 0 |
'''simple docstring'''
def A__ ( A_ ) -> Dict:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def A__ ( A_ ) -> list[tuple[int, int]]:
_lowercase = 0
_lowercase = len(A_ ) # No of vertices in graph
_lowercase = [0] * n
_lowercase = [False] * n
def dfs(A_ , A_ , A_ , A_ ):
_lowercase = True
_lowercase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(A_ , A_ , A_ , id_ )
_lowercase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
_lowercase = min(low[at] , low[to] )
_lowercase = []
for i in range(A_ ):
if not visited[i]:
dfs(A_ , -1 , A_ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 602 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__magic_name__ : List[Any] = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = ['''BeitFeatureExtractor''']
__magic_name__ : int = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[Any] = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__magic_name__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 602 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : int = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 556 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
a : Optional[int] = logging.getLogger(__name__)
@dataclass
class _a ( _lowerCAmelCase ):
A = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
A = field(default=_lowerCAmelCase , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
A = field(default=_lowerCAmelCase , metadata={'''help''': '''whether to use adafactor'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
A = field(default=_lowerCAmelCase , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
A = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 556 | 1 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
# TODO Update this
lowerCamelCase_ : Optional[int] = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class a__ ( _snake_case ):
A__ : Any = 'esm'
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_2_6 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[int]:
super().__init__(pad_token_id=UpperCAmelCase , mask_token_id=UpperCAmelCase , **UpperCAmelCase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = emb_layer_norm_before
__a = token_dropout
__a = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
__a = EsmFoldConfig()
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
__a = EsmFoldConfig(**UpperCAmelCase )
__a = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
__a = get_default_vocab_list()
else:
__a = vocab_list
else:
__a = None
__a = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , UpperCAmelCase ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase ):
__a = self.esmfold_config.to_dict()
return output
@dataclass
class a__ :
A__ : str = None
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : float = 0
A__ : bool = True
A__ : bool = False
A__ : int = 1_28
A__ : "TrunkConfig" = None
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
if self.trunk is None:
__a = TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase ):
__a = TrunkConfig(**self.trunk )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a = asdict(self )
__a = self.trunk.to_dict()
return output
@dataclass
class a__ :
A__ : int = 48
A__ : int = 10_24
A__ : int = 1_28
A__ : int = 32
A__ : int = 32
A__ : int = 32
A__ : float = 0
A__ : float = 0
A__ : bool = False
A__ : int = 4
A__ : Optional[int] = 1_28
A__ : "StructureModuleConfig" = None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
if self.structure_module is None:
__a = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase ):
__a = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
__a = self.sequence_state_dim // self.sequence_head_width
__a = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = asdict(self )
__a = self.structure_module.to_dict()
return output
@dataclass
class a__ :
A__ : int = 3_84
A__ : int = 1_28
A__ : int = 16
A__ : int = 1_28
A__ : int = 12
A__ : int = 4
A__ : int = 8
A__ : float = 0.1
A__ : int = 8
A__ : int = 1
A__ : int = 2
A__ : int = 7
A__ : int = 10
A__ : float = 1E-8
A__ : float = 1E5
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return asdict(self )
def lowerCAmelCase( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 713 | from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class a__ ( __snake_case ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A__ : str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
A__ : ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} )
A__ : ClassVar[Features] = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
A__ : str = "question"
A__ : str = "context"
A__ : str = "answers"
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 246 | 0 |
from abc import ABC, abstractmethod
from typing import List, Optional
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str ) -> Optional[Any]:
# test for the above condition
self.test()
def _lowercase ( self : Tuple ) -> int:
lowercase_ = 0
lowercase_ = False
while not completed:
if counter == 1:
self.reset()
lowercase_ = self.advance()
if not self.does_advance(SCREAMING_SNAKE_CASE_ ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
lowercase_ , lowercase_ , lowercase_ = self.update(SCREAMING_SNAKE_CASE_ )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def _lowercase ( self : int ) -> str:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowercase ( self : Optional[Any] ) -> Dict:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowercase ( self : int ) -> Any:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int]=False ) -> str:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[int] ) -> Any:
super(SCREAMING_SNAKE_CASE_ , self ).__init__()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
lowercase_ = token_ids
lowercase_ = len(self.token_ids )
lowercase_ = -1 # the index of the currently fulfilled step
lowercase_ = False
def _lowercase ( self : int ) -> List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}''' )
lowercase_ = False
lowercase_ = False
lowercase_ = False
if self.does_advance(SCREAMING_SNAKE_CASE_ ):
self.fulfilled_idx += 1
lowercase_ = True
if self.fulfilled_idx == (self.seqlen - 1):
lowercase_ = True
lowercase_ = completed
else:
# failed to make progress.
lowercase_ = True
self.reset()
return stepped, completed, reset
def _lowercase ( self : str ) -> Dict:
lowercase_ = False
lowercase_ = 0
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
return self.seqlen - (self.fulfilled_idx + 1)
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=False ) -> Optional[Any]:
lowercase_ = PhrasalConstraint(self.token_ids )
if stateful:
lowercase_ = self.seqlen
lowercase_ = self.fulfilled_idx
lowercase_ = self.completed
return new_constraint
class lowercase__:
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : List[List[int]] , SCREAMING_SNAKE_CASE_ : Optional[int]=True ) -> Dict:
lowercase_ = max([len(SCREAMING_SNAKE_CASE_ ) for one in nested_token_ids] )
lowercase_ = {}
for token_ids in nested_token_ids:
lowercase_ = root
for tidx, token_id in enumerate(SCREAMING_SNAKE_CASE_ ):
if token_id not in level:
lowercase_ = {}
lowercase_ = level[token_id]
if no_subsets and self.has_subsets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f''' {nested_token_ids}.''' )
lowercase_ = root
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
lowercase_ = self.trie
for current_token in current_seq:
lowercase_ = start[current_token]
lowercase_ = list(start.keys() )
return next_tokens
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
lowercase_ = self.next_tokens(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 0
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
lowercase_ = list(root.values() )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return 1
else:
return sum([self.count_leaves(SCREAMING_SNAKE_CASE_ ) for nn in next_nodes] )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
lowercase_ = self.count_leaves(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) != leaf_count
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[List[int]] ) -> List[str]:
super(SCREAMING_SNAKE_CASE_ , self ).__init__()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
lowercase_ = DisjunctiveTrie(SCREAMING_SNAKE_CASE_ )
lowercase_ = nested_token_ids
lowercase_ = self.trie.max_height
lowercase_ = []
lowercase_ = False
def _lowercase ( self : Dict ) -> int:
lowercase_ = self.trie.next_tokens(self.current_seq )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return None
else:
return token_list
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : int ) -> Dict:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}''' )
lowercase_ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}''' )
lowercase_ = False
lowercase_ = False
lowercase_ = False
if self.does_advance(SCREAMING_SNAKE_CASE_ ):
self.current_seq.append(SCREAMING_SNAKE_CASE_ )
lowercase_ = True
else:
lowercase_ = True
self.reset()
lowercase_ = self.trie.reached_leaf(self.current_seq )
lowercase_ = completed
return stepped, completed, reset
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = False
lowercase_ = []
def _lowercase ( self : Any ) -> int:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ) -> Dict:
lowercase_ = DisjunctiveConstraint(self.token_ids )
if stateful:
lowercase_ = self.seqlen
lowercase_ = self.current_seq
lowercase_ = self.completed
return new_constraint
class lowercase__:
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : List[Constraint] ) -> Tuple:
lowercase_ = constraints
# max # of steps required to fulfill a given constraint
lowercase_ = max([c.seqlen for c in constraints] )
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
lowercase_ = False
self.init_state()
def _lowercase ( self : str ) -> Tuple:
lowercase_ = []
lowercase_ = None
lowercase_ = [constraint.copy(stateful=SCREAMING_SNAKE_CASE_ ) for constraint in self.constraints]
def _lowercase ( self : Optional[Any] ) -> Any:
lowercase_ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def _lowercase ( self : int ) -> Tuple:
lowercase_ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowercase_ = constraint.advance()
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.append(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.extend(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = self.inprogress_constraint.advance()
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.append(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.extend(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return None
else:
return token_list
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] ) -> Optional[int]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowercase_ , lowercase_ = self.add(SCREAMING_SNAKE_CASE_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
lowercase_ , lowercase_ = False, False
if self.completed:
lowercase_ = True
lowercase_ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowercase_ , lowercase_ , lowercase_ = self.inprogress_constraint.update(SCREAMING_SNAKE_CASE_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=SCREAMING_SNAKE_CASE_ ) )
lowercase_ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowercase_ = None
if len(self.pending_constraints ) == 0:
# we're done!
lowercase_ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(SCREAMING_SNAKE_CASE_ ):
lowercase_ , lowercase_ , lowercase_ = pending_constraint.update(SCREAMING_SNAKE_CASE_ )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(SCREAMING_SNAKE_CASE_ )
lowercase_ = None
if not complete and stepped:
lowercase_ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowercase_ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowercase_ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : List[Any]=True ) -> str:
lowercase_ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowercase_ = [
constraint.copy(stateful=SCREAMING_SNAKE_CASE_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowercase_ = self.inprogress_constraint.copy(stateful=SCREAMING_SNAKE_CASE_ )
lowercase_ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 97 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
lowerCamelCase__ = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(a_ )
from datasets import load_dataset
lowerCamelCase__ = load_dataset("""nielsr/rvlcdip-demo""" )
lowerCamelCase__ = dataset["""train"""][0]["""image"""].convert("""RGB""" )
lowerCamelCase__ = image_processor(a_ , return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**a_ )
lowerCamelCase__ = outputs.logits
lowerCamelCase__ = torch.Size((1, 16) )
self.assertEqual(logits.shape , a_ )
lowerCamelCase__ = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=a_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a_ , atol=1e-4 ) )
| 165 | 0 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
UpperCamelCase = 10
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
for i in range(snake_case__ ,snake_case__ ):
if array[i] == target:
return i
return -1
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ )
while left <= right:
if right - left < precision:
return lin_search(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = (left + right) // 3 + 1
_SCREAMING_SNAKE_CASE = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_SCREAMING_SNAKE_CASE = one_third - 1
elif array[two_third] < target:
_SCREAMING_SNAKE_CASE = two_third + 1
else:
_SCREAMING_SNAKE_CASE = one_third + 1
_SCREAMING_SNAKE_CASE = two_third - 1
else:
return -1
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = (left + right) // 3 + 1
_SCREAMING_SNAKE_CASE = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(snake_case__ ,one_third - 1 ,snake_case__ ,snake_case__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 ,snake_case__ ,snake_case__ ,snake_case__ )
else:
return rec_ternary_search(one_third + 1 ,two_third - 1 ,snake_case__ ,snake_case__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by comma:\n''').strip()
UpperCamelCase = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
UpperCamelCase = int(input('''Enter the number to be found in the list:\n''').strip())
UpperCamelCase = ite_ternary_search(collection, target)
UpperCamelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"Iterative search: {target} found at positions: {resulta}")
print(f"Recursive search: {target} found at positions: {resulta}")
else:
print('''Not found''')
| 569 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""google/mt5-small""" )
_SCREAMING_SNAKE_CASE = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
_SCREAMING_SNAKE_CASE = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
_SCREAMING_SNAKE_CASE = shift_tokens_right(UpperCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ ).logits
_SCREAMING_SNAKE_CASE = optax.softmax_cross_entropy(UpperCAmelCase_ , onehot(UpperCAmelCase_ , logits.shape[-1] ) ).mean()
_SCREAMING_SNAKE_CASE = -(labels.shape[-1] * loss.item())
_SCREAMING_SNAKE_CASE = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 569 | 1 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = self._create_example_records()
_a = Dataset.from_list(UpperCamelCase__ )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(UpperCamelCase__ ):
self.assertDictEqual(UpperCamelCase__ , example_records[i] )
def SCREAMING_SNAKE_CASE_ ( self :int ):
_a = self._create_example_records()
_a = Dataset.from_list(UpperCamelCase__ )
_a = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ): # checks what happens with missing columns
_a = [{"col_1": 1}, {"col_2": "x"}]
_a = Dataset.from_list(UpperCamelCase__ )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ): # checks if the type can be inferred from the second record
_a = [{"col_1": []}, {"col_1": [1, 2]}]
_a = Dataset.from_list(UpperCamelCase__ )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def SCREAMING_SNAKE_CASE_ ( self :int ):
_a = Dataset.from_list([] )
self.assertEqual(len(UpperCamelCase__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 388 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __a ( a ):
"""simple docstring"""
_a = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_a = [1_4_4, 1_9_2, 2_4_0]
_a = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0]
elif "mobilevit_xs" in mobilevit_name:
_a = [9_6, 1_2_0, 1_4_4]
_a = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4]
elif "mobilevit_xxs" in mobilevit_name:
_a = [6_4, 8_0, 9_6]
_a = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0]
_a = 0.05
_a = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
_a = 5_1_2
_a = 1_6
_a = 2_1
_a = "pascal-voc-id2label.json"
else:
_a = 1_0_0_0
_a = "imagenet-1k-id2label.json"
_a = "huggingface/label-files"
_a = json.load(open(hf_hub_download(a, a, repo_type="dataset" ), "r" ) )
_a = {int(a ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
return config
def __a ( a, a=False ):
"""simple docstring"""
for i in range(1, 6 ):
if F'layer_{i}.' in name:
_a = name.replace(F'layer_{i}.', F'encoder.layer.{i - 1}.' )
if "conv_1." in name:
_a = name.replace("conv_1.", "conv_stem." )
if ".block." in name:
_a = name.replace(".block.", "." )
if "exp_1x1" in name:
_a = name.replace("exp_1x1", "expand_1x1" )
if "red_1x1" in name:
_a = name.replace("red_1x1", "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
_a = name.replace(".local_rep.conv_3x3.", ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
_a = name.replace(".local_rep.conv_1x1.", ".conv_1x1." )
if ".norm." in name:
_a = name.replace(".norm.", ".normalization." )
if ".conv." in name:
_a = name.replace(".conv.", ".convolution." )
if ".conv_proj." in name:
_a = name.replace(".conv_proj.", ".conv_projection." )
for i in range(0, 2 ):
for j in range(0, 4 ):
if F'.{i}.{j}.' in name:
_a = name.replace(F'.{i}.{j}.', F'.{i}.layer.{j}.' )
for i in range(2, 6 ):
for j in range(0, 4 ):
if F'.{i}.{j}.' in name:
_a = name.replace(F'.{i}.{j}.', F'.{i}.' )
if "expand_1x1" in name:
_a = name.replace("expand_1x1", "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
_a = name.replace("conv_3x3", "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
_a = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" )
for i in range(2, 5 ):
if F'.global_rep.{i}.weight' in name:
_a = name.replace(F'.global_rep.{i}.weight', ".layernorm.weight" )
if F'.global_rep.{i}.bias' in name:
_a = name.replace(F'.global_rep.{i}.bias', ".layernorm.bias" )
if ".global_rep." in name:
_a = name.replace(".global_rep.", ".transformer." )
if ".pre_norm_mha.0." in name:
_a = name.replace(".pre_norm_mha.0.", ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
_a = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
_a = name.replace(".pre_norm_ffn.0.", ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
_a = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
_a = name.replace(".pre_norm_ffn.4.", ".output.dense." )
if ".transformer." in name:
_a = name.replace(".transformer.", ".transformer.layer." )
if ".aspp_layer." in name:
_a = name.replace(".aspp_layer.", "." )
if ".aspp_pool." in name:
_a = name.replace(".aspp_pool.", "." )
if "seg_head." in name:
_a = name.replace("seg_head.", "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
_a = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." )
if "classifier.fc." in name:
_a = name.replace("classifier.fc.", "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
_a = "mobilevit." + name
return name
def __a ( a, a, a=False ):
"""simple docstring"""
if base_model:
_a = ""
else:
_a = "mobilevit."
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(a )
if key[:8] == "encoder.":
_a = key[8:]
if "qkv" in key:
_a = key.split("." )
_a = int(key_split[0][6:] ) - 1
_a = int(key_split[3] )
_a = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}' )
_a = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_a = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
else:
_a = val
return orig_state_dict
def __a ( ):
"""simple docstring"""
_a = "http://images.cocodataset.org/val2017/000000039769.jpg"
_a = Image.open(requests.get(a, stream=a ).raw )
return im
@torch.no_grad()
def __a ( a, a, a, a=False ):
"""simple docstring"""
_a = get_mobilevit_config(a )
# load original state_dict
_a = torch.load(a, map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
_a = MobileViTForSemanticSegmentation(a ).eval()
else:
_a = MobileViTForImageClassification(a ).eval()
_a = convert_state_dict(a, a )
model.load_state_dict(a )
# Check outputs on an image, prepared by MobileViTImageProcessor
_a = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 3_2 )
_a = image_processor(images=prepare_img(), return_tensors="pt" )
_a = model(**a )
_a = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 2_1, 3_2, 3_2)
if mobilevit_name == "deeplabv3_mobilevit_s":
_a = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_a = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_a = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3], a, atol=1e-4 )
else:
assert logits.shape == (1, 1_0_0_0)
if mobilevit_name == "mobilevit_s":
_a = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
_a = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
_a = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3], a, atol=1e-4 )
Path(a ).mkdir(exist_ok=a )
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(a )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(a )
if push_to_hub:
_a = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
_a = model_mapping[mobilevit_name]
image_processor.push_to_hub(a, organization="apple" )
model.push_to_hub(a, organization="apple" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 388 | 1 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__lowerCamelCase : List[str] = logging.get_logger(__name__)
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ ='''AutoTokenizer'''
UpperCamelCase__ =['''tokenizer''']
UpperCamelCase__ ={
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any=None ) -> Union[str, Any]:
super().__init__(lowerCamelCase_ )
__magic_name__ : int = speaker_embeddings
@classmethod
def UpperCAmelCase__ ( cls : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict="speaker_embeddings_path.json" , **lowerCamelCase_ : int ) -> List[Any]:
if speaker_embeddings_dict_path is not None:
__magic_name__ : List[str] = get_file_from_repo(
lowerCamelCase_ , lowerCamelCase_ , subfolder=kwargs.pop('''subfolder''' , lowerCamelCase_ ) , cache_dir=kwargs.pop('''cache_dir''' , lowerCamelCase_ ) , force_download=kwargs.pop('''force_download''' , lowerCamelCase_ ) , proxies=kwargs.pop('''proxies''' , lowerCamelCase_ ) , resume_download=kwargs.pop('''resume_download''' , lowerCamelCase_ ) , local_files_only=kwargs.pop('''local_files_only''' , lowerCamelCase_ ) , use_auth_token=kwargs.pop('''use_auth_token''' , lowerCamelCase_ ) , revision=kwargs.pop('''revision''' , lowerCamelCase_ ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(lowerCamelCase_ , lowerCamelCase_ )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
__magic_name__ : Optional[Any] = None
else:
with open(lowerCamelCase_ ) as speaker_embeddings_json:
__magic_name__ : Optional[Any] = json.load(lowerCamelCase_ )
else:
__magic_name__ : List[Any] = None
__magic_name__ : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
return cls(tokenizer=lowerCamelCase_ , speaker_embeddings=lowerCamelCase_ )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int]="speaker_embeddings_path.json" , lowerCamelCase_ : Optional[Any]="speaker_embeddings" , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Tuple , ) -> List[Any]:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCamelCase_ , lowerCamelCase_ , '''v2''' ) , exist_ok=lowerCamelCase_ )
__magic_name__ : List[str] = {}
__magic_name__ : Union[str, Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__magic_name__ : Union[str, Any] = self._load_voice_preset(lowerCamelCase_ )
__magic_name__ : str = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , lowerCamelCase_ , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=lowerCamelCase_ , )
__magic_name__ : Optional[int] = os.path.join(lowerCamelCase_ , F'''{prompt_key}_{key}.npy''' )
__magic_name__ : str = tmp_dict
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , '''w''' ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
super().save_pretrained(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self : int , lowerCamelCase_ : str = None , **lowerCamelCase_ : Any ) -> Union[str, Any]:
__magic_name__ : List[str] = self.speaker_embeddings[voice_preset]
__magic_name__ : List[str] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
__magic_name__ : str = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , lowerCamelCase_ ) , cache_dir=kwargs.pop('''cache_dir''' , lowerCamelCase_ ) , force_download=kwargs.pop('''force_download''' , lowerCamelCase_ ) , proxies=kwargs.pop('''proxies''' , lowerCamelCase_ ) , resume_download=kwargs.pop('''resume_download''' , lowerCamelCase_ ) , local_files_only=kwargs.pop('''local_files_only''' , lowerCamelCase_ ) , use_auth_token=kwargs.pop('''use_auth_token''' , lowerCamelCase_ ) , revision=kwargs.pop('''revision''' , lowerCamelCase_ ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
__magic_name__ : Optional[Any] = np.load(lowerCamelCase_ )
return voice_preset_dict
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase_ : Optional[dict] = None ) -> Dict:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self : Optional[int] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[str]="pt" , lowerCamelCase_ : List[str]=256 , lowerCamelCase_ : int=False , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Union[str, Any] , ) -> Any:
if voice_preset is not None and not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
if (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__magic_name__ : Optional[int] = self._load_voice_preset(lowerCamelCase_ )
else:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not voice_preset.endswith('''.npz''' ):
__magic_name__ : Optional[int] = voice_preset + '''.npz'''
__magic_name__ : Union[str, Any] = np.load(lowerCamelCase_ )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : Union[str, Any] = BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
__magic_name__ : str = self.tokenizer(
lowerCamelCase_ , return_tensors=lowerCamelCase_ , padding='''max_length''' , max_length=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
if voice_preset is not None:
__magic_name__ : int = voice_preset
return encoded_text
| 501 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {}
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ ='''llama'''
UpperCamelCase__ =['''past_key_values''']
def __init__( self : Dict , lowerCamelCase_ : List[str]=32000 , lowerCamelCase_ : List[Any]=4096 , lowerCamelCase_ : Any=11008 , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Optional[int]=2048 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Tuple=1E-6 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : List[str]=0 , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Dict=False , lowerCamelCase_ : int=None , **lowerCamelCase_ : Tuple , ) -> Tuple:
__magic_name__ : Any = vocab_size
__magic_name__ : Optional[int] = max_position_embeddings
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : Dict = intermediate_size
__magic_name__ : Union[str, Any] = num_hidden_layers
__magic_name__ : Tuple = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : Optional[int] = num_key_value_heads
__magic_name__ : List[str] = hidden_act
__magic_name__ : List[str] = initializer_range
__magic_name__ : Tuple = rms_norm_eps
__magic_name__ : Optional[int] = pretraining_tp
__magic_name__ : Any = use_cache
__magic_name__ : int = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , tie_word_embeddings=lowerCamelCase_ , **lowerCamelCase_ , )
def UpperCAmelCase__ ( self : str ) -> Tuple:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
__magic_name__ : List[str] = self.rope_scaling.get('''type''' , lowerCamelCase_ )
__magic_name__ : str = self.rope_scaling.get('''factor''' , lowerCamelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 501 | 1 |
'''simple docstring'''
import os
def snake_case ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = os.path.dirname(os.path.realpath(snake_case ) )
lowerCAmelCase = os.path.join(snake_case , 'triangle.txt' )
with open(snake_case ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = []
for line in triangle:
lowerCAmelCase = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(snake_case ) )
a.append(snake_case )
for i in range(1 , len(snake_case ) ):
for j in range(len(a[i] ) ):
lowerCAmelCase = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCAmelCase = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(snake_case , snake_case )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 284 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_UpperCamelCase = pd.read_csv('sample_data.csv', header=None)
_UpperCamelCase = df.shape[:1][0]
# If you're using some other dataset input the target column
_UpperCamelCase = df.iloc[:, 1:2]
_UpperCamelCase = actual_data.values.reshape(len_data, 1)
_UpperCamelCase = MinMaxScaler().fit_transform(actual_data)
_UpperCamelCase = 10
_UpperCamelCase = 5
_UpperCamelCase = 20
_UpperCamelCase = len_data - periods * look_back
_UpperCamelCase = actual_data[:division]
_UpperCamelCase = actual_data[division - look_back :]
_UpperCamelCase , _UpperCamelCase = [], []
_UpperCamelCase , _UpperCamelCase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_UpperCamelCase = np.array(train_x)
_UpperCamelCase = np.array(test_x)
_UpperCamelCase = np.array([list(i.ravel()) for i in train_y])
_UpperCamelCase = np.array([list(i.ravel()) for i in test_y])
_UpperCamelCase = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
_UpperCamelCase = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_UpperCamelCase = model.predict(x_test)
| 179 | 0 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
SCREAMING_SNAKE_CASE__ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
SCREAMING_SNAKE_CASE__ = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
SCREAMING_SNAKE_CASE__ = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.img_to_array(test_image)
SCREAMING_SNAKE_CASE__ = np.expand_dims(test_image, axis=0)
SCREAMING_SNAKE_CASE__ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
SCREAMING_SNAKE_CASE__ = 'Normal'
if result[0][0] == 1:
SCREAMING_SNAKE_CASE__ = 'Abnormality detected'
| 705 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = IFInpaintingPipeline
A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def _a ( self : Any ):
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _a ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _a ( self : Optional[int] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self : List[str] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
self._test_save_load_local()
def _a ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 52 | 0 |
from math import factorial, pi
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : int = 30 ) -> float:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
SCREAMING_SNAKE_CASE_ : int = float(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : int = 30 ) -> float:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
SCREAMING_SNAKE_CASE_ : str = float(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 105 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__magic_name__ : List[Any] =logging.getLogger(__name__)
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : str=-1 ) -> List[str]:
# in NER datasets, the last column is usually reserved for NER label
__magic_name__ = label_idx
def __A ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
__magic_name__ = []
__magic_name__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
__magic_name__ = []
__magic_name__ = []
else:
__magic_name__ = line.split(" " )
words.append(splits[0] )
if len(_lowerCamelCase ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
return examples
def __A ( self : Optional[Any] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Union[str, Any]:
__magic_name__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(_lowerCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__magic_name__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(_lowerCamelCase )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __A ( self : Tuple , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : int ) -> str:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __A ( self : int , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = []
__magic_name__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
return examples
def __A ( self : Optional[int] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Any:
__magic_name__ = 0
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = preds_list[example_id]
__magic_name__ = ""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(_lowerCamelCase )
example_id += 1
def __A ( self : Dict , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 664 | 0 |
"""simple docstring"""
def lowercase (snake_case__ : str , snake_case__ : str ) -> float:
'''simple docstring'''
def get_matched_characters(snake_case__ : str , snake_case__ : str ) -> str:
lowerCAmelCase = []
lowerCAmelCase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowerCAmelCase = int(max(0 , i - limit ) )
lowerCAmelCase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(snake_case__ )
lowerCAmelCase = f'''{_stra[0:_stra.index(snake_case__ )]} {_stra[_stra.index(snake_case__ ) + 1:]}'''
return "".join(snake_case__ )
# matching characters
lowerCAmelCase = get_matched_characters(snake_case__ , snake_case__ )
lowerCAmelCase = get_matched_characters(snake_case__ , snake_case__ )
lowerCAmelCase = len(snake_case__ )
# transposition
lowerCAmelCase = (
len([(ca, ca) for ca, ca in zip(snake_case__ , snake_case__ ) if ca != ca] ) // 2
)
if not match_count:
lowerCAmelCase = 0.0
else:
lowerCAmelCase = (
1
/ 3
* (
match_count / len(snake_case__ )
+ match_count / len(snake_case__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowerCAmelCase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 529 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 529 | 1 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : str = AutoencoderKL
lowerCAmelCase : Optional[int] = """sample"""
lowerCAmelCase : Dict = 1E-2
@property
def __A ( self ):
A__ = 4
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ )
return {"sample": image}
@property
def __A ( self ):
return (3, 32, 32)
@property
def __A ( self ):
return (3, 32, 32)
def __A ( self ):
A__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
A__ = self.dummy_input
return init_dict, inputs_dict
def __A ( self ):
pass
def __A ( self ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def __A ( self ):
# enable deterministic behavior for gradient checkpointing
A__ , A__ = self.prepare_init_args_and_inputs_for_common()
A__ = self.model_class(**UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
assert not model.is_gradient_checkpointing and model.training
A__ = model(**UpperCAmelCase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
A__ = torch.randn_like(UpperCAmelCase__ )
A__ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
A__ = self.model_class(**UpperCAmelCase__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(UpperCAmelCase__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
A__ = model_a(**UpperCAmelCase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
A__ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
A__ = dict(model.named_parameters() )
A__ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def __A ( self ):
A__ , A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase__ )
A__ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __A ( self ):
A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
A__ = model.to(UpperCAmelCase__ )
model.eval()
if torch_device == "mps":
A__ = torch.manual_seed(0 )
else:
A__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
A__ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
A__ = image.to(UpperCAmelCase__ )
with torch.no_grad():
A__ = model(UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ , generator=UpperCAmelCase__ ).sample
A__ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
A__ = torch.tensor(
[
-4.0_0_7_8e-0_1,
-3.8_3_2_3e-0_4,
-1.2_6_8_1e-0_1,
-1.1_4_6_2e-0_1,
2.0_0_9_5e-0_1,
1.0_8_9_3e-0_1,
-8.8_2_4_7e-0_2,
-3.0_3_6_1e-0_1,
-9.8_6_4_4e-0_3,
] )
elif torch_device == "cpu":
A__ = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
A__ = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-2 ) )
@slow
class UpperCamelCase ( unittest.TestCase ):
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
return F"""gaussian_noise_s={seed}_shape={"_".join([str(UpperCAmelCase__ ) for s in shape] )}.npy"""
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , UpperCAmelCase__=0 , UpperCAmelCase__=(4, 3, 512, 512) , UpperCAmelCase__=False ):
A__ = torch.floataa if fpaa else torch.floataa
A__ = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase__ , UpperCAmelCase__ ) ) ).to(UpperCAmelCase__ ).to(UpperCAmelCase__ )
return image
def __A ( self , UpperCAmelCase__="CompVis/stable-diffusion-v1-4" , UpperCAmelCase__=False ):
A__ = "fp16" if fpaa else None
A__ = torch.floataa if fpaa else torch.floataa
A__ = AutoencoderKL.from_pretrained(
UpperCAmelCase__ , subfolder="vae" , torch_dtype=UpperCAmelCase__ , revision=UpperCAmelCase__ , )
model.to(UpperCAmelCase__ ).eval()
return model
def __A ( self , UpperCAmelCase__=0 ):
if torch_device == "mps":
return torch.manual_seed(UpperCAmelCase__ )
return torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.get_sd_vae_model()
A__ = self.get_sd_image(UpperCAmelCase__ )
A__ = self.get_generator(UpperCAmelCase__ )
with torch.no_grad():
A__ = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample
assert sample.shape == image.shape
A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.get_sd_vae_model(fpaa=UpperCAmelCase__ )
A__ = self.get_sd_image(UpperCAmelCase__ , fpaa=UpperCAmelCase__ )
A__ = self.get_generator(UpperCAmelCase__ )
with torch.no_grad():
A__ = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample
assert sample.shape == image.shape
A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
A__ = torch.tensor(UpperCAmelCase__ )
assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.get_sd_vae_model()
A__ = self.get_sd_image(UpperCAmelCase__ )
with torch.no_grad():
A__ = model(UpperCAmelCase__ ).sample
assert sample.shape == image.shape
A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.get_sd_vae_model()
A__ = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
A__ = model.decode(UpperCAmelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
A__ = sample[-1, -2:, :2, -2:].flatten().cpu()
A__ = torch.tensor(UpperCAmelCase__ )
assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.get_sd_vae_model(fpaa=UpperCAmelCase__ )
A__ = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 64, 64) , fpaa=UpperCAmelCase__ )
with torch.no_grad():
A__ = model.decode(UpperCAmelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
A__ = torch.tensor(UpperCAmelCase__ )
assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def __A ( self , UpperCAmelCase__ ):
A__ = self.get_sd_vae_model(fpaa=UpperCAmelCase__ )
A__ = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 64, 64) , fpaa=UpperCAmelCase__ )
with torch.no_grad():
A__ = model.decode(UpperCAmelCase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
A__ = model.decode(UpperCAmelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def __A ( self , UpperCAmelCase__ ):
A__ = self.get_sd_vae_model()
A__ = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
A__ = model.decode(UpperCAmelCase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
A__ = model.decode(UpperCAmelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.get_sd_vae_model()
A__ = self.get_sd_image(UpperCAmelCase__ )
A__ = self.get_generator(UpperCAmelCase__ )
with torch.no_grad():
A__ = model.encode(UpperCAmelCase__ ).latent_dist
A__ = dist.sample(generator=UpperCAmelCase__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
A__ = sample[0, -1, -3:, -3:].flatten().cpu()
A__ = torch.tensor(UpperCAmelCase__ )
A__ = 3e-3 if torch_device != "mps" else 1e-2
assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=UpperCAmelCase__ )
| 491 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCamelCase ( unittest.TestCase ):
def __A ( self ):
A__ = 10
def __A ( self ):
A__ = [1, 2, 3, 4]
A__ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
A__ , A__ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __A ( self ):
A__ = ""
A__ , A__ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __A ( self ):
A__ = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
A__ , A__ = process_story(UpperCAmelCase__ )
A__ = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = ["It was the best of times."]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self ):
A__ = torch.tensor([1, 2, 3, 4] )
A__ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = 101
A__ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A__ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A__ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 491 | 1 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_lowerCAmelCase = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _SCREAMING_SNAKE_CASE :
def __init__( self : Any , a__ : int , a__ : str=16 , a__ : List[Any]=13 , a__ : List[str]=7 , a__ : Tuple=14 , a__ : Tuple=10 , a__ : Tuple=19 , a__ : List[Any]=5 , a__ : Union[str, Any]=4 , a__ : Optional[Any]=True , a__ : Dict=16 , a__ : List[Any]=2 , a__ : Any=4 , a__ : Dict=4 , a__ : Optional[Any]="gelu" , a__ : Union[str, Any]=0.1 , a__ : List[Any]=0.1 , a__ : int=[1, 2, 3, 4, 5] , a__ : Dict=25 , a__ : Optional[int]=5 , ):
__magic_name__ = d_model
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = prediction_length
__magic_name__ = context_length
__magic_name__ = cardinality
__magic_name__ = num_time_features
__magic_name__ = lags_sequence
__magic_name__ = embedding_dimension
__magic_name__ = is_training
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = context_length
__magic_name__ = prediction_length + label_length
__magic_name__ = label_length
__magic_name__ = moving_average
__magic_name__ = autocorrelation_factor
def snake_case__ ( self : Any ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def snake_case__ ( self : Optional[int] , a__ : List[str] ):
__magic_name__ = config.context_length + max(config.lags_sequence )
__magic_name__ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__magic_name__ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__magic_name__ = floats_tensor([self.batch_size, _past_length] )
__magic_name__ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__magic_name__ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__magic_name__ = floats_tensor([self.batch_size, config.prediction_length] )
__magic_name__ = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def snake_case__ ( self : Any ):
__magic_name__ = self.get_config()
__magic_name__ = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def snake_case__ ( self : Dict ):
__magic_name__ = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self : Optional[int] , a__ : Any , a__ : Any ):
__magic_name__ = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
__magic_name__ = model(**UpperCamelCase__ )
__magic_name__ = outputs.encoder_last_hidden_state
__magic_name__ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
__magic_name__ = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
__magic_name__ = model.create_network_inputs(**UpperCamelCase__ )
__magic_name__ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__magic_name__ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__magic_name__ = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__magic_name__ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__magic_name__ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__magic_name__ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__magic_name__ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
__magic_name__ = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
__magic_name__ = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[int] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE :Optional[int] = (AutoformerForPrediction,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE :Dict = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE :Optional[Any] = False
__SCREAMING_SNAKE_CASE :int = False
__SCREAMING_SNAKE_CASE :Optional[int] = False
__SCREAMING_SNAKE_CASE :Optional[Any] = False
__SCREAMING_SNAKE_CASE :Any = False
__SCREAMING_SNAKE_CASE :int = False
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = AutoformerModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def snake_case__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def snake_case__ ( self : Optional[int] ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
__magic_name__ = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info['''missing_keys'''] , [] )
def snake_case__ ( self : str ):
__magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def snake_case__ ( self : List[str] ):
pass
def snake_case__ ( self : List[Any] ):
__magic_name__ = inspect.signature(getattr(UpperCamelCase__ , '''forward''' ) )
# The main input is the name of the argument after `self`
__magic_name__ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def snake_case__ ( self : int ):
__magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def snake_case__ ( self : str ):
__magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = True
__magic_name__ = getattr(self.model_tester , '''seq_length''' , UpperCamelCase__ )
__magic_name__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCamelCase__ )
__magic_name__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCamelCase__ )
__magic_name__ = getattr(self.model_tester , '''d_model''' , UpperCamelCase__ )
__magic_name__ = getattr(self.model_tester , '''num_attention_heads''' , UpperCamelCase__ )
__magic_name__ = d_model // num_attention_heads
for model_class in self.all_model_classes:
__magic_name__ = True
__magic_name__ = False
__magic_name__ = True
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__magic_name__ = True
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__magic_name__ = len(UpperCamelCase__ )
__magic_name__ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
__magic_name__ = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__magic_name__ = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__magic_name__ = True
__magic_name__ = True
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
__magic_name__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def snake_case__ ( self : List[str] ):
super().test_retain_grad_hidden_states_attentions()
def UpperCamelCase ( a="train-batch.pt" ) -> str:
'''simple docstring'''
__magic_name__ = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__UpperCamelCase , repo_type='''dataset''' )
__magic_name__ = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )
return batch
@require_torch
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(UpperCamelCase__ )
__magic_name__ = prepare_batch()
with torch.no_grad():
__magic_name__ = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
__magic_name__ = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
__magic_name__ = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(UpperCamelCase__ )
__magic_name__ = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__magic_name__ = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
__magic_name__ = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
__magic_name__ = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def snake_case__ ( self : int ):
__magic_name__ = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(UpperCamelCase__ )
__magic_name__ = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__magic_name__ = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
__magic_name__ = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
__magic_name__ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase__ )
__magic_name__ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1E-1 ) )
| 711 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( a=None , a=None ) -> Union[str, Any]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=a )
@dataclass
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE :List[str] = list_field(
default=[] ,metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} ,)
__SCREAMING_SNAKE_CASE :List[int] = list_field(
default=[8] ,metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
__SCREAMING_SNAKE_CASE :List[int] = list_field(
default=[8, 32, 128, 512] ,metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} ,)
__SCREAMING_SNAKE_CASE :bool = field(
default=__a ,metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} ,)
__SCREAMING_SNAKE_CASE :bool = field(
default=__a ,metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} ,)
__SCREAMING_SNAKE_CASE :bool = field(
default=__a ,metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
__SCREAMING_SNAKE_CASE :bool = field(default=__a ,metadata={"""help""": """Use FP16 to accelerate inference."""} )
__SCREAMING_SNAKE_CASE :bool = field(default=__a ,metadata={"""help""": """Benchmark training of model"""} )
__SCREAMING_SNAKE_CASE :bool = field(default=__a ,metadata={"""help""": """Verbose memory tracing"""} )
__SCREAMING_SNAKE_CASE :bool = field(
default=__a ,metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} ,)
__SCREAMING_SNAKE_CASE :bool = field(
default=__a ,metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} ,)
__SCREAMING_SNAKE_CASE :bool = field(default=__a ,metadata={"""help""": """Trace memory line by line"""} )
__SCREAMING_SNAKE_CASE :bool = field(default=__a ,metadata={"""help""": """Save result to a CSV file"""} )
__SCREAMING_SNAKE_CASE :bool = field(default=__a ,metadata={"""help""": """Save all print statements in a log file"""} )
__SCREAMING_SNAKE_CASE :bool = field(default=__a ,metadata={"""help""": """Whether to print environment information"""} )
__SCREAMING_SNAKE_CASE :bool = field(
default=__a ,metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} ,)
__SCREAMING_SNAKE_CASE :str = field(
default=f'''inference_time_{round(time() )}.csv''' ,metadata={"""help""": """CSV filename used if saving time results to csv."""} ,)
__SCREAMING_SNAKE_CASE :str = field(
default=f'''inference_memory_{round(time() )}.csv''' ,metadata={"""help""": """CSV filename used if saving memory results to csv."""} ,)
__SCREAMING_SNAKE_CASE :str = field(
default=f'''train_time_{round(time() )}.csv''' ,metadata={"""help""": """CSV filename used if saving time results to csv for training."""} ,)
__SCREAMING_SNAKE_CASE :str = field(
default=f'''train_memory_{round(time() )}.csv''' ,metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} ,)
__SCREAMING_SNAKE_CASE :str = field(
default=f'''env_info_{round(time() )}.csv''' ,metadata={"""help""": """CSV filename used if saving environment information."""} ,)
__SCREAMING_SNAKE_CASE :str = field(
default=f'''log_{round(time() )}.csv''' ,metadata={"""help""": """Log filename used if print statements are saved in log."""} ,)
__SCREAMING_SNAKE_CASE :int = field(default=3 ,metadata={"""help""": """Times an experiment will be run."""} )
__SCREAMING_SNAKE_CASE :bool = field(
default=__a ,metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} ,)
def snake_case__ ( self : Union[str, Any] ):
warnings.warn(
F'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , a__ , )
def snake_case__ ( self : Dict ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case__ ( self : Dict ):
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def snake_case__ ( self : Dict ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 245 | 0 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""nielsr/canine-s""": 20_48,
}
# Unicode defines 1,114,112 total “codepoints”
__snake_case = 1_11_41_12
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__snake_case = 0
__snake_case = 0XE0_00
__snake_case = 0XE0_01
__snake_case = 0XE0_02
__snake_case = 0XE0_03
__snake_case = 0XE0_04
# Maps special codepoints to human-readable names.
__snake_case = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
__snake_case = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , SCREAMING_SNAKE_CASE_=chr(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_=chr(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_=chr(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_=chr(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_=chr(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_=chr(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2048 , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
UpperCamelCase :Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
UpperCamelCase :Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
UpperCamelCase :Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
UpperCamelCase :int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
UpperCamelCase :Tuple = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase :Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , model_max_length=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCamelCase :Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCamelCase :int = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCamelCase :Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCamelCase :Tuple = UNICODE_VOCAB_SIZE
UpperCamelCase :Union[str, Any] = len(self._special_codepoints )
@property
def UpperCAmelCase ( self ) -> int:
return self._unicode_vocab_size
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
return list(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
try:
return ord(SCREAMING_SNAKE_CASE_ )
except TypeError:
raise ValueError(F'''invalid token: \'{token}\'''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(SCREAMING_SNAKE_CASE_ )
except TypeError:
raise ValueError(F'''invalid id: {index}''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return "".join(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase :Dict = [self.sep_token_id]
UpperCamelCase :Union[str, Any] = [self.cls_token_id]
UpperCamelCase :Optional[Any] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return result
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase :int = [self.sep_token_id]
UpperCamelCase :List[str] = [self.cls_token_id]
UpperCamelCase :Optional[int] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Optional[Any]:
return ()
| 658 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 | 1 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class _UpperCAmelCase :
def __init__( self , lowercase_ = None ) -> List[str]:
UpperCAmelCase = value
UpperCAmelCase = None # Added in order to delete a node easier
UpperCAmelCase = None
UpperCAmelCase = None
def __repr__( self ) -> List[str]:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"{self.value}": (self.left, self.right)} , indent=1 )
class _UpperCAmelCase :
def __init__( self , lowercase_ = None ) -> List[Any]:
UpperCAmelCase = root
def __str__( self ) -> Optional[Any]:
return str(self.root )
def a_ ( self , lowercase_ , lowercase_ ) -> int:
if new_children is not None: # reset its kids
UpperCAmelCase = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowercase_ ): # If it is the right children
UpperCAmelCase = new_children
else:
UpperCAmelCase = new_children
else:
UpperCAmelCase = new_children
def a_ ( self , lowercase_ ) -> Tuple:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def a_ ( self ) -> int:
return self.root is None
def a_ ( self , lowercase_ ) -> List[Any]:
UpperCAmelCase = Node(lowercase_ ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase = new_node # set its root
else: # Tree is not empty
UpperCAmelCase = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase = new_node
break
else:
UpperCAmelCase = parent_node.right
UpperCAmelCase = parent_node
def a_ ( self , *lowercase_ ) -> Optional[int]:
for value in values:
self.__insert(lowercase_ )
def a_ ( self , lowercase_ ) -> Dict:
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.' )
else:
UpperCAmelCase = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase = node.left if value < node.value else node.right
return node
def a_ ( self , lowercase_ = None ) -> Tuple:
if node is None:
if self.root is None:
return None
UpperCAmelCase = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase = node.right
return node
def a_ ( self , lowercase_ = None ) -> List[str]:
if node is None:
UpperCAmelCase = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase = self.root
while node.left is not None:
UpperCAmelCase = node.left
return node
def a_ ( self , lowercase_ ) -> Any:
UpperCAmelCase = self.search(lowercase_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowercase_ , lowercase_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowercase_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowercase_ , node.left )
else:
UpperCAmelCase = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def a_ ( self , lowercase_ ) -> Optional[Any]:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def a_ ( self , lowercase_=None ) -> str:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def a_ ( self , lowercase_ , lowercase_ ) -> str:
if node:
self.inorder(lowercase_ , node.left )
arr.append(node.value )
self.inorder(lowercase_ , node.right )
def a_ ( self , lowercase_ , lowercase_ ) -> Tuple:
UpperCAmelCase = []
self.inorder(lowercase_ , lowercase_ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowercase__ ( lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = []
if curr_node is not None:
UpperCAmelCase = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowercase__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase = BinarySearchTree()
for i in testlist:
t.insert(_snake_case )
# Prints all the elements of the list in order traversal
print(_snake_case )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_snake_case )
print(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 708 |
"""simple docstring"""
import sys
import turtle
def lowercase__ ( lowerCAmelCase : tuple[float, float] , lowerCAmelCase : tuple[float, float] ) -> tuple[float, float]:
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase__ ( lowerCAmelCase : tuple[float, float] , lowerCAmelCase : tuple[float, float] , lowerCAmelCase : tuple[float, float] , lowerCAmelCase : int , ) -> None:
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
SCREAMING_SNAKE_CASE_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
SCREAMING_SNAKE_CASE_ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 183 | 0 |
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
class _snake_case ( a__ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=768):
super().__init__(_lowerCamelCase)
UpperCAmelCase__ : List[Any] = proj_size
UpperCAmelCase__ : Optional[int] = CLIPVisionModel(_lowerCamelCase)
UpperCAmelCase__ : List[Any] = PaintByExampleMapper(_lowerCamelCase)
UpperCAmelCase__ : Dict = nn.LayerNorm(config.hidden_size)
UpperCAmelCase__ : List[str] = nn.Linear(config.hidden_size , self.proj_size)
# uncondition for scaling
UpperCAmelCase__ : Any = nn.Parameter(torch.randn((1, 1, self.proj_size)))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=False):
UpperCAmelCase__ : Optional[int] = self.model(pixel_values=_lowerCamelCase)
UpperCAmelCase__ : Dict = clip_output.pooler_output
UpperCAmelCase__ : List[Any] = self.mapper(latent_states[:, None])
UpperCAmelCase__ : List[Any] = self.final_layer_norm(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = self.proj_out(_lowerCamelCase)
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase):
super().__init__()
UpperCAmelCase__ : List[Any] = (config.num_hidden_layers + 1) // 5
UpperCAmelCase__ : List[Any] = config.hidden_size
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , activation_fn="""gelu""" , attention_bias=_lowerCamelCase)
for _ in range(_lowerCamelCase)
])
def snake_case__ ( self , _lowerCamelCase):
for block in self.blocks:
UpperCAmelCase__ : Union[str, Any] = block(_lowerCamelCase)
return hidden_states | 407 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__A ={
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _UpperCamelCase ( UpperCamelCase__ ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if args.student_type == "roberta":
UpperCAmelCase__ : Optional[Any] = False
elif args.student_type == "gpt2":
UpperCAmelCase__ : str = False
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if args.student_type == "roberta":
UpperCAmelCase__ : str = False
def _UpperCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=UpperCamelCase__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=UpperCamelCase__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=UpperCamelCase__ , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=UpperCamelCase__ , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=UpperCamelCase__ , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=UpperCamelCase__ , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=UpperCamelCase__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=UpperCamelCase__ , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=UpperCamelCase__ , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=UpperCamelCase__ , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=UpperCamelCase__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=UpperCamelCase__ , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=UpperCamelCase__ , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=UpperCamelCase__ , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=UpperCamelCase__ , default=5_0 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=UpperCamelCase__ , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=UpperCamelCase__ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5e-4 , type=UpperCamelCase__ , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=UpperCamelCase__ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=UpperCamelCase__ , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=UpperCamelCase__ , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=UpperCamelCase__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=UpperCamelCase__ , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=UpperCamelCase__ , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=UpperCamelCase__ , default=5_6 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=UpperCamelCase__ , default=5_0_0 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=UpperCamelCase__ , default=4_0_0_0 , help="""Checkpoint interval.""" )
UpperCAmelCase__ : Dict = parser.parse_args()
sanity_checks(UpperCamelCase__ )
# ARGS #
init_gpu_params(UpperCamelCase__ )
set_seed(UpperCamelCase__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(UpperCamelCase__ ) , UpperCamelCase__ , indent=4 )
git_log(args.dump_path )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = MODEL_CLASSES[args.student_type]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase__ : Optional[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase__ : Any = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase__ : Any = tokenizer.all_special_tokens.index(UpperCamelCase__ )
UpperCAmelCase__ : str = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
UpperCAmelCase__ : Any = special_tok_ids
UpperCAmelCase__ : str = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , """rb""" ) as fp:
UpperCAmelCase__ : str = pickle.load(UpperCamelCase__ )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , """rb""" ) as fp:
UpperCAmelCase__ : int = pickle.load(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = np.maximum(UpperCamelCase__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase__ : Tuple = 0.0 # do not predict special tokens
UpperCAmelCase__ : Any = torch.from_numpy(UpperCamelCase__ )
else:
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[int] = LmSeqsDataset(params=UpperCamelCase__ , data=UpperCamelCase__ )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
UpperCAmelCase__ : int = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase__ : Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
UpperCAmelCase__ : str = student_model_class.from_pretrained(args.student_pretrained_weights , config=UpperCamelCase__ )
else:
UpperCAmelCase__ : Any = student_model_class(UpperCamelCase__ )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info("""Student loaded.""" )
# TEACHER #
UpperCAmelCase__ : int = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=UpperCamelCase__ )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(UpperCamelCase__ , UpperCamelCase__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(UpperCamelCase__ , UpperCamelCase__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase__ : Optional[Any] = Distiller(
params=UpperCamelCase__ , dataset=UpperCamelCase__ , token_probs=UpperCamelCase__ , student=UpperCamelCase__ , teacher=UpperCamelCase__ )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main() | 407 | 1 |
'''simple docstring'''
_lowerCAmelCase = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Dict = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
lowerCAmelCase__ : Stack[int] = Stack()
lowerCAmelCase__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(UpperCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(UpperCamelCase )
elif i == ")":
# RULE 4
lowerCAmelCase__ : List[str] = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase__ : str = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : Optional[int] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : Any = operators[opr](UpperCamelCase , UpperCamelCase )
operand_stack.push(UpperCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_lowerCAmelCase = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 160 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
while a != 0:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = b % a, a
return b
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if gcd(UpperCamelCase , UpperCamelCase ) != 1:
lowerCAmelCase__ : List[Any] = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = 1, 0, a
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = 0, 1, m
while va != 0:
lowerCAmelCase__ : Optional[int] = ua // va
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 160 | 1 |
"""simple docstring"""
def __snake_case ( ) -> Dict:
"""simple docstring"""
A = []
A = 1
while len(UpperCamelCase__ ) < 1E6:
constant.append(str(UpperCamelCase__ ) )
i += 1
A = ''.join(UpperCamelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 690 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = """convbert"""
def __init__( self : Optional[int] , _lowercase : List[Any]=30_522 , _lowercase : List[str]=768 , _lowercase : Optional[Any]=12 , _lowercase : Any=12 , _lowercase : str=3_072 , _lowercase : List[str]="gelu" , _lowercase : Dict=0.1 , _lowercase : Dict=0.1 , _lowercase : Any=512 , _lowercase : List[str]=2 , _lowercase : Tuple=0.0_2 , _lowercase : List[Any]=1e-12 , _lowercase : List[str]=1 , _lowercase : Tuple=0 , _lowercase : Any=2 , _lowercase : Union[str, Any]=768 , _lowercase : str=2 , _lowercase : Any=9 , _lowercase : Union[str, Any]=1 , _lowercase : Dict=None , **_lowercase : Union[str, Any] , ):
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = embedding_size
A = head_ratio
A = conv_kernel_size
A = num_groups
A = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase_ ):
@property
def __a ( self : str ):
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 690 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = "arrow" , **lowerCAmelCase_ , ) -> Dict:
super().__init__(
split=lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ , streaming=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A = load_from_cache_file
_A = file_format
_A = Spark(
df=lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , working_dir=lowerCAmelCase_ , **lowerCAmelCase_ , )
def UpperCAmelCase ( self ) -> str:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_A = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCAmelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 716 | import numpy as np
import qiskit
def snake_case ( snake_case__ :int = 8 , snake_case__ :int | None = None) -> str:
_A = np.random.default_rng(seed=snake_case__)
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_A = 6 * key_len
# Measurement basis for Alice's qubits.
_A = rng.integers(2 , size=snake_case__)
# The set of states Alice will prepare.
_A = rng.integers(2 , size=snake_case__)
# Measurement basis for Bob's qubits.
_A = rng.integers(2 , size=snake_case__)
# Quantum Circuit to simulate BB84
_A = qiskit.QuantumCircuit(snake_case__ , name="""BB84""")
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case__):
if alice_state[index] == 1:
bbaa_circ.x(snake_case__)
if alice_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case__):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case__)
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_A = qiskit.Aer.get_backend("""aer_simulator""")
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_A = qiskit.execute(snake_case__ , snake_case__ , shots=1 , seed_simulator=snake_case__)
# Returns the result of measurement.
_A = job.result().get_counts(snake_case__).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_A = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case__ , snake_case__ , snake_case__)
if alice_basis_bit == bob_basis_bit
])
# Get final key. Pad with 0 if too short, otherwise truncate.
_A = gen_key[:key_len] if len(snake_case__) >= key_len else gen_key.ljust(snake_case__ , """0""")
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 83 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __UpperCamelCase ( A_ ):
lowerCamelCase : Optional[Any] ="""facebook/bart-large-mnli"""
lowerCamelCase : int =(
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
lowerCamelCase : Dict ="""text_classifier"""
lowerCamelCase : int =AutoTokenizer
lowerCamelCase : List[str] =AutoModelForSequenceClassification
lowerCamelCase : List[str] =["""text""", ["""text"""]]
lowerCamelCase : int =["""text"""]
def __a ( self ) -> List[str]:
super().setup()
a : List[Any] = self.model.config
a : Tuple = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
a : List[Any] = int(lowerCAmelCase__ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
a : Union[str, Any] = labels
return self.pre_processor(
[text] * len(lowerCAmelCase__ ) , [f"""This example is {label}""" for label in labels] , return_tensors="pt" , padding="max_length" , )
def __a ( self , lowerCAmelCase__ ) -> Tuple:
a : List[Any] = outputs.logits
a : Tuple = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 633 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
def __init__( self : Optional[int] , UpperCamelCase : UNetaDModel , UpperCamelCase : KarrasVeScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase )
@torch.no_grad()
def __call__( self : List[str] , UpperCamelCase : int = 1 , UpperCamelCase : int = 50 , UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , **UpperCamelCase : Optional[int] , ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.unet.config.sample_size
_lowercase : Dict = (batch_size, 3, img_size, img_size)
_lowercase : int = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_lowercase : Any = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_lowercase : Any = self.scheduler.schedule[t]
_lowercase : Tuple = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_lowercase , _lowercase : Any = self.scheduler.add_noise_to_input(UpperCamelCase , UpperCamelCase , generator=UpperCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowercase : Optional[int] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_lowercase : Dict = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowercase : Optional[Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_lowercase : Any = self.scheduler.step_correct(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , step_output.prev_sample , step_output['''derivative'''] , )
_lowercase : Dict = step_output.prev_sample
_lowercase : List[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
_lowercase : List[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowercase : List[Any] = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase ) | 322 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=0.9_99 , _UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCAmelCase):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCAmelCase):
return math.exp(t * -12.0)
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''')
SCREAMING_SNAKE_CASE = []
for i in range(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCAmelCase) / alpha_bar_fn(_UpperCAmelCase) , _UpperCAmelCase))
return torch.tensor(_UpperCAmelCase , dtype=torch.floataa)
class _snake_case ( A__ , A__ ):
_lowercase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
_lowercase : List[str] = 2
@register_to_config
def __init__( self , a = 1000 , a = 0.0_00_85 , a = 0.0_12 , a = "linear" , a = None , a = "epsilon" , a = "linspace" , a = 0 , ) -> Any:
if trained_betas is not None:
SCREAMING_SNAKE_CASE = torch.tensor(a , dtype=torch.floataa)
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE = torch.linspace(a , a , a , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE = betas_for_alpha_bar(a)
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''')
SCREAMING_SNAKE_CASE = 1.0 - self.betas
SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(a , a , a)
def SCREAMING_SNAKE_CASE__ ( self , a , a=None) -> Any:
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE = self.timesteps
SCREAMING_SNAKE_CASE = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
SCREAMING_SNAKE_CASE = 1 if len(a) > 1 else 0
else:
SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(a) else timestep
SCREAMING_SNAKE_CASE = self._index_counter[timestep_int]
return indices[pos].item()
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def SCREAMING_SNAKE_CASE__ ( self , a , a , ) -> torch.FloatTensor:
SCREAMING_SNAKE_CASE = self.index_for_timestep(a)
if self.state_in_first_order:
SCREAMING_SNAKE_CASE = self.sigmas[step_index]
else:
SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index]
SCREAMING_SNAKE_CASE = sample / ((sigma**2 + 1) ** 0.5)
return sample
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = None , ) -> int:
SCREAMING_SNAKE_CASE = num_inference_steps
SCREAMING_SNAKE_CASE = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a)[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE = (np.arange(0 , a) * step_ratio).round()[::-1].copy().astype(a)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE = (np.arange(a , 0 , -step_ratio)).round().copy().astype(a)
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''')
SCREAMING_SNAKE_CASE = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
SCREAMING_SNAKE_CASE = torch.from_numpy(np.log(a)).to(a)
SCREAMING_SNAKE_CASE = np.interp(a , np.arange(0 , len(a)) , a)
SCREAMING_SNAKE_CASE = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
SCREAMING_SNAKE_CASE = torch.from_numpy(a).to(device=a)
# interpolate sigmas
SCREAMING_SNAKE_CASE = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp()
SCREAMING_SNAKE_CASE = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]])
SCREAMING_SNAKE_CASE = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]])
if str(a).startswith('mps'):
# mps does not support float64
SCREAMING_SNAKE_CASE = torch.from_numpy(a).to(a , dtype=torch.floataa)
else:
SCREAMING_SNAKE_CASE = torch.from_numpy(a).to(a)
# interpolate timesteps
SCREAMING_SNAKE_CASE = self.sigma_to_t(a).to(a , dtype=timesteps.dtype)
SCREAMING_SNAKE_CASE = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten()
SCREAMING_SNAKE_CASE = torch.cat([timesteps[:1], interleaved_timesteps])
SCREAMING_SNAKE_CASE = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE = defaultdict(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
# get log sigma
SCREAMING_SNAKE_CASE = sigma.log()
# get distribution
SCREAMING_SNAKE_CASE = log_sigma - self.log_sigmas[:, None]
# get sigmas range
SCREAMING_SNAKE_CASE = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
SCREAMING_SNAKE_CASE = low_idx + 1
SCREAMING_SNAKE_CASE = self.log_sigmas[low_idx]
SCREAMING_SNAKE_CASE = self.log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE = w.clamp(0 , 1)
# transform interpolation to time range
SCREAMING_SNAKE_CASE = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE = t.view(sigma.shape)
return t
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
return self.sample is None
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a = True , ) -> Union[SchedulerOutput, Tuple]:
SCREAMING_SNAKE_CASE = self.index_for_timestep(a)
# advance index counter by 1
SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(a) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE = self.sigmas[step_index]
SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index + 1]
SCREAMING_SNAKE_CASE = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
SCREAMING_SNAKE_CASE = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index]
SCREAMING_SNAKE_CASE = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_interpol
SCREAMING_SNAKE_CASE = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_interpol
SCREAMING_SNAKE_CASE = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample')
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''')
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE = sigma_interpol - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
SCREAMING_SNAKE_CASE = sigma_next - sigma_hat
SCREAMING_SNAKE_CASE = self.sample
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
SCREAMING_SNAKE_CASE = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(a):
# mps does not support float64
SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device , dtype=torch.floataa)
SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device)
SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device)
SCREAMING_SNAKE_CASE = [self.index_for_timestep(a , a) for t in timesteps]
SCREAMING_SNAKE_CASE = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
SCREAMING_SNAKE_CASE = sigma.unsqueeze(-1)
SCREAMING_SNAKE_CASE = original_samples + noise * sigma
return noisy_samples
def __len__( self) -> Optional[int]:
return self.config.num_train_timesteps
| 702 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _snake_case :
def __init__( self , a , ) -> Tuple:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = 13
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = 99
SCREAMING_SNAKE_CASE = 32
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 37
SCREAMING_SNAKE_CASE = 'gelu'
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 512
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 0.02
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = None
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> List[str]:
SCREAMING_SNAKE_CASE = TFEsmModel(config=a)
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = [input_ids, input_mask]
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = TFEsmModel(config=a)
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = [input_ids, input_mask]
SCREAMING_SNAKE_CASE = model(a , encoder_hidden_states=a)
# Also check the case where encoder outputs are not passed
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = TFEsmForMaskedLM(config=a)
SCREAMING_SNAKE_CASE = model([input_ids, input_mask])
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> Dict:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFEsmForTokenClassification(config=a)
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : str = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : int = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : str = False
_lowercase : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = TFEsmModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained(a)
self.assertIsNotNone(a)
@unittest.skip('Protein models do not support embedding resizing.')
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
pass
@unittest.skip('Protein models do not support embedding resizing.')
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(a)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
SCREAMING_SNAKE_CASE = model.get_bias()
assert isinstance(a , a)
for k, v in name.items():
assert isinstance(a , tf.Variable)
else:
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
assert x is None
SCREAMING_SNAKE_CASE = model.get_bias()
assert name is None
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D')
SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]])
SCREAMING_SNAKE_CASE = model(a)[0]
SCREAMING_SNAKE_CASE = [1, 6, 33]
self.assertEqual(list(output.numpy().shape) , a)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2))
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D')
SCREAMING_SNAKE_CASE = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
SCREAMING_SNAKE_CASE = model(a)[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 444 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__: Dict = logging.get_logger(__name__)
__magic_name__: Tuple = {"vocab_file": "spiece.model"}
__magic_name__: int = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
__magic_name__: Union[str, Any] = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
__magic_name__: Dict = "▁"
class snake_case__ ( _SCREAMING_SNAKE_CASE ):
lowercase__ : Any = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Any:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__magic_name__ : Tuple = (
AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ , normalized=lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else mask_token
)
__magic_name__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
__magic_name__ : Optional[Any] = do_lower_case
__magic_name__ : Optional[Any] = remove_space
__magic_name__ : int = keep_accents
__magic_name__ : Tuple = vocab_file
__magic_name__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def __magic_name__ ( self ) -> Any:
return len(self.sp_model )
def __magic_name__ ( self ) -> str:
__magic_name__ : Optional[int] = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
__magic_name__ : Tuple = self.__dict__.copy()
__magic_name__ : Optional[int] = None
return state
def __setstate__( self , lowerCAmelCase__ ) -> List[Any]:
__magic_name__ : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__ : Any = {}
__magic_name__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
if self.remove_space:
__magic_name__ : Optional[int] = """ """.join(inputs.strip().split() )
else:
__magic_name__ : List[str] = inputs
__magic_name__ : Optional[Any] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
__magic_name__ : List[Any] = unicodedata.normalize("""NFKD""" , lowerCAmelCase_ )
__magic_name__ : Optional[int] = """""".join([c for c in outputs if not unicodedata.combining(lowerCAmelCase_ )] )
if self.do_lower_case:
__magic_name__ : List[str] = outputs.lower()
return outputs
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]:
__magic_name__ : Optional[Any] = self.preprocess_text(lowerCAmelCase_ )
__magic_name__ : Dict = self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
__magic_name__ : Any = []
for piece in pieces:
if len(lowerCAmelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__magic_name__ : Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__magic_name__ : Any = cur_pieces[1:]
else:
__magic_name__ : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCAmelCase_ )
else:
new_pieces.append(lowerCAmelCase_ )
return new_pieces
def __magic_name__ ( self , lowerCAmelCase__ ) -> int:
return self.sp_model.PieceToId(lowerCAmelCase_ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
return self.sp_model.IdToPiece(lowerCAmelCase_ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : str = []
__magic_name__ : Union[str, Any] = """"""
__magic_name__ : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
__magic_name__ : str = True
__magic_name__ : Tuple = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
__magic_name__ : Union[str, Any] = False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[Any]:
__magic_name__ : Optional[Any] = [self.sep_token_id]
__magic_name__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> Union[str, Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Union[str, Any]:
__magic_name__ : List[str] = [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> str:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : Optional[Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , """wb""" ) as fi:
__magic_name__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 324 |
"""simple docstring"""
def __UpperCamelCase ( snake_case__ = 200 ):
A_ : Union[str, Any] = [1, 2, 5, 10, 20, 50, 100, 200]
A_ : int = [0] * (pence + 1)
A_ : Tuple = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(snake_case__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 180 | 0 |
import argparse
_snake_case : Union[str, Any] = 'docs/source/_static/js/custom.js'
def _A ( __snake_case :List[Any] ) -> Any:
"""simple docstring"""
with open(__snake_case , encoding="utf-8" , newline="\n" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
__SCREAMING_SNAKE_CASE = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(__snake_case , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__snake_case )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
_snake_case : Tuple = parser.parse_args()
update_custom_js(args.version)
| 712 |
def _A ( __snake_case :int ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError("check_bouncy() accepts only integer arguments" )
__SCREAMING_SNAKE_CASE = str(__snake_case )
__SCREAMING_SNAKE_CASE = "".join(sorted(__snake_case ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _A ( __snake_case :float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
while True:
if check_bouncy(__snake_case ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 214 | 0 |
def __magic_name__ ( lowercase ) -> int:
"""simple docstring"""
lowercase_ : Union[str, Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
lowercase_ : Any = hex_num[0] == """-"""
if is_negative:
lowercase_ : Dict = hex_num[1:]
try:
lowercase_ : Optional[int] = int(lowercase , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
lowercase_ : Dict = """"""
while int_num > 0:
lowercase_ : List[str] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod() | 458 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Optional[int] = """canine"""
def __init__( self, snake_case__=7_68, snake_case__=12, snake_case__=12, snake_case__=30_72, snake_case__="gelu", snake_case__=0.1, snake_case__=0.1, snake_case__=1_63_84, snake_case__=16, snake_case__=0.02, snake_case__=1E-12, snake_case__=0, snake_case__=0XE_0_0_0, snake_case__=0XE_0_0_1, snake_case__=4, snake_case__=4, snake_case__=8, snake_case__=1_63_84, snake_case__=1_28, **snake_case__, ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case__, bos_token_id=snake_case__, eos_token_id=snake_case__, **snake_case__ )
lowercase_ : int = max_position_embeddings
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : List[Any] = intermediate_size
lowercase_ : Optional[Any] = hidden_act
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : Optional[Any] = type_vocab_size
lowercase_ : Dict = layer_norm_eps
# Character config:
lowercase_ : Optional[int] = downsampling_rate
lowercase_ : List[str] = upsampling_kernel_size
lowercase_ : int = num_hash_functions
lowercase_ : List[str] = num_hash_buckets
lowercase_ : List[str] = local_transformer_stride | 458 | 1 |
"""simple docstring"""
import numpy as np
def _snake_case ( lowercase__ ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod() | 492 |
"""simple docstring"""
lowercase__ = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
lowercase__ = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
lowercase__ = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
lowercase__ = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
lowercase__ = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
lowercase__ = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
lowercase__ = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
lowercase__ = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
] | 492 | 1 |
from __future__ import annotations
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__ ) -> int:
__UpperCAmelCase , __UpperCAmelCase = text, pattern
__UpperCAmelCase , __UpperCAmelCase = len(lowercase__ ), len(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCAmelCase_ (self , lowercase__ ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCAmelCase_ (self ) -> list[int]:
# searches pattern in text and returns index positions
__UpperCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
__UpperCAmelCase = self.mismatch_in_text(lowercase__ )
if mismatch_index == -1:
positions.append(lowercase__ )
else:
__UpperCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
__UpperCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A_ : Optional[Any] = 'ABAABA'
A_ : Union[str, Any] = 'AB'
A_ : str = BoyerMooreSearch(text, pattern)
A_ : Optional[int] = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 303 |
from math import ceil
def __a ( SCREAMING_SNAKE_CASE = 1_0_0_1 ) -> int:
'''simple docstring'''
__UpperCAmelCase = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__UpperCAmelCase = 2 * i + 1
__UpperCAmelCase = 2 * i
__UpperCAmelCase = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A_ : Dict = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 303 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowerCAmelCase__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class a__ ( datasets.BuilderConfig ):
"""simple docstring"""
__lowerCamelCase = 10000
__lowerCamelCase = None
__lowerCamelCase = None
class a__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__lowerCamelCase = ParquetConfig
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase ( self , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
A__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowercase , (str, list, tuple) ):
A__ = data_files
if isinstance(lowercase , lowercase ):
A__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A__ = [dl_manager.iter_files(lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
A__ = []
for split_name, files in data_files.items():
if isinstance(lowercase , lowercase ):
A__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A__ = [dl_manager.iter_files(lowercase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(lowercase ):
with open(lowercase , "rb" ) as f:
A__ = datasets.Features.from_arrow_schema(pq.read_schema(lowercase ) )
break
splits.append(datasets.SplitGenerator(name=lowercase , gen_kwargs={"files": files} ) )
return splits
def UpperCamelCase ( self , lowercase ) -> pa.Table:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ = table_cast(lowercase , self.info.features.arrow_schema )
return pa_table
def UpperCamelCase ( self , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(lowercase ) ):
with open(lowercase , "rb" ) as f:
A__ = pq.ParquetFile(lowercase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
A__ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'{file_idx}_{batch_idx}', self._cast_table(lowercase )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(lowercase )}: {e}' )
raise
| 626 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , lowercase=1 / 255 , lowercase=True , ) -> Union[str, Any]:
'''simple docstring'''
A__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self , lowercase , lowercase=False ) -> int:
'''simple docstring'''
if not batched:
A__ = image_inputs[0]
if isinstance(lowercase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["shortest_edge"] * h / w )
A__ = self.size["shortest_edge"]
elif w > h:
A__ = self.size["shortest_edge"]
A__ = int(self.size["shortest_edge"] * w / h )
else:
A__ = self.size["shortest_edge"]
A__ = self.size["shortest_edge"]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(lowercase , key=lambda lowercase : item[0] )[0]
A__ = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = DetaImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = DetaImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "do_rescale" ) )
self.assertTrue(hasattr(lowercase , "do_pad" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
A__ = json.loads(f.read() )
A__ = {"image_id": 39769, "annotations": target}
# encode them
A__ = DetaImageProcessor()
A__ = image_processing(images=lowercase , annotations=lowercase , return_tensors="pt" )
# verify pixel values
A__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase )
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) )
# verify orig_size
A__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) )
# verify size
A__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
@slow
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
A__ = json.loads(f.read() )
A__ = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
A__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
A__ = DetaImageProcessor(format="coco_panoptic" )
A__ = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors="pt" )
# verify pixel values
A__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase )
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) )
# verify masks
A__ = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowercase )
# verify orig_size
A__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) )
# verify size
A__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
| 626 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
_snake_case =StableDiffusionInstructPixaPixPipeline
_snake_case =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
_snake_case =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_snake_case =IMAGE_TO_IMAGE_IMAGE_PARAMS
_snake_case =IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self: Tuple ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCAmelCase_ =PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
torch.manual_seed(0 )
UpperCAmelCase_ =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ =CLIPTextModel(_lowerCAmelCase )
UpperCAmelCase_ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Any , _lowerCAmelCase: Tuple=0 ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self: Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs(_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ =np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs(_lowerCAmelCase )
UpperCAmelCase_ ="french fries"
UpperCAmelCase_ =sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ =np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs(_lowerCAmelCase )
UpperCAmelCase_ =[inputs["prompt"]] * 2
UpperCAmelCase_ =np.array(inputs["image"] ).astype(np.floataa ) / 2_55.0
UpperCAmelCase_ =torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 ).to(_lowerCAmelCase )
UpperCAmelCase_ =image / 2 + 0.5
UpperCAmelCase_ =image.permute(0 , 3 , 1 , 2 )
UpperCAmelCase_ =image.repeat(2 , 1 , 1 , 1 )
UpperCAmelCase_ =sd_pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
UpperCAmelCase_ =np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" )
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs(_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =[round(_lowerCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(_lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ =np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCAmelCase__ ( self: str ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
UpperCAmelCase_ =VaeImageProcessor(do_resize=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type="pt" ) )[0]
UpperCAmelCase_ =components["vae"]
UpperCAmelCase_ =self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCAmelCase_ =vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCAmelCase_ =pipe(**_lowerCAmelCase )[0]
UpperCAmelCase_ =np.abs(out - out_latents_inputs ).max()
self.assertLess(_lowerCAmelCase , 1e-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Dict ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Tuple=0 ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
UpperCAmelCase_ ={
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self: Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ =self.get_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ =np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase )
UpperCAmelCase_ =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ =self.get_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ =np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase )
UpperCAmelCase_ =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ =self.get_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ =np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =0
def callback_fn(_lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: torch.FloatTensor ) -> None:
UpperCAmelCase_ =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase_ =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ =latents[0, -3:, -3:, -1]
UpperCAmelCase_ =np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
UpperCAmelCase_ =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ =latents[0, -3:, -3:, -1]
UpperCAmelCase_ =np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
UpperCAmelCase_ =False
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ =self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ =self.get_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase )
UpperCAmelCase_ =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCAmelCase__ ( self: Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase_ =inputs["image"].resize((504, 504) )
UpperCAmelCase_ ="timbrooks/instruct-pix2pix"
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ =pipe(**_lowerCAmelCase )
UpperCAmelCase_ =output.images[0]
UpperCAmelCase_ =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
UpperCAmelCase_ =np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 54 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowercase_ ( UpperCamelCase__):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = data
def __iter__( self ):
"""simple docstring"""
for element in self.data:
yield element
def lowerCamelCase_ ( UpperCAmelCase__=True ):
"""simple docstring"""
a_ = Accelerator(even_batches=UpperCAmelCase__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ):
"""simple docstring"""
if iterable:
a_ = DummyIterableDataset(torch.as_tensor(range(UpperCAmelCase__ ) ) )
else:
a_ = TensorDataset(torch.as_tensor(range(UpperCAmelCase__ ) ) )
a_ = DataLoader(UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
a_ = accelerator.prepare(UpperCAmelCase__ )
return dl
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
"""simple docstring"""
a_ = create_dataloader(accelerator=UpperCAmelCase__ , dataset_size=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
a_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = create_accelerator(even_batches=UpperCAmelCase__ )
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = create_accelerator(even_batches=UpperCAmelCase__ )
a_ = torch.nn.Linear(1 , 1 )
a_ = accelerator.prepare(UpperCAmelCase__ )
a_ = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
a_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(UpperCAmelCase__ ):
a_ = ddp_model(batch[0].float() )
a_ = output.sum()
loss.backward()
batch_idxs.append(UpperCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
with warnings.catch_warnings(record=UpperCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , UpperCAmelCase__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = True
a_ = False
a_ = create_accelerator(even_batches=UpperCAmelCase__ )
a_ = torch.nn.Linear(1 , 1 )
a_ = accelerator.prepare(UpperCAmelCase__ )
a_ = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
a_ = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
a_ = train_dl.batch_sampler.even_batches
a_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = True
a_ = False
a_ = create_accelerator(even_batches=UpperCAmelCase__ )
a_ = torch.nn.Linear(1 , 1 )
a_ = accelerator.prepare(UpperCAmelCase__ )
create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCAmelCase__ )
a_ = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
a_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = create_accelerator()
a_ = torch.nn.Linear(1 , 1 )
a_ = accelerator.prepare(UpperCAmelCase__ )
create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCAmelCase__ )
with warnings.catch_warnings(record=UpperCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
pass
assert issubclass(w[-1].category , UpperCAmelCase__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
a_ = accelerator.state.distributed_type
a_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(UpperCAmelCase__ )
a_ = original_state
if __name__ == "__main__":
main() | 483 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class _A ( __lowercase ):
__a = """dpr"""
def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE = 0 , **_SCREAMING_SNAKE_CASE , ):
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = projection_dim
_UpperCAmelCase = position_embedding_type | 175 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
a = logging.get_logger("transformers.models.speecht5")
a = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
a = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
a = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
a = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
a = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
a = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
a = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
a = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
a = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
a = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a = []
a = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
a = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
a = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
a = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]:
for attribute in key.split(""".""" ):
_UpperCAmelCase = getattr(snake_case , snake_case )
if weight_type is not None:
_UpperCAmelCase = getattr(snake_case , snake_case ).shape
else:
_UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
elif weight_type == "running_mean":
_UpperCAmelCase = value
elif weight_type == "running_var":
_UpperCAmelCase = value
elif weight_type == "num_batches_tracked":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Tuple:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_UpperCAmelCase , _UpperCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = []
if task == "s2t":
_UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_UpperCAmelCase = MAPPING_S2T
_UpperCAmelCase = IGNORE_KEYS_S2T
elif task == "t2s":
_UpperCAmelCase = None
_UpperCAmelCase = MAPPING_T2S
_UpperCAmelCase = IGNORE_KEYS_T2S
elif task == "s2s":
_UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_UpperCAmelCase = MAPPING_S2S
_UpperCAmelCase = IGNORE_KEYS_S2S
else:
raise ValueError(f"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(snake_case , snake_case ):
logger.info(f"{name} was ignored" )
continue
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == """group""" , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_UpperCAmelCase , _UpperCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
_UpperCAmelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(snake_case )[0].split(""".""" )[-2]
_UpperCAmelCase = mapped_key.replace("""*""" , snake_case )
if "weight_g" in name:
_UpperCAmelCase = """weight_g"""
elif "weight_v" in name:
_UpperCAmelCase = """weight_v"""
elif "bias" in name:
_UpperCAmelCase = """bias"""
elif "weight" in name:
_UpperCAmelCase = """weight"""
elif "running_mean" in name:
_UpperCAmelCase = """running_mean"""
elif "running_var" in name:
_UpperCAmelCase = """running_var"""
elif "num_batches_tracked" in name:
_UpperCAmelCase = """num_batches_tracked"""
else:
_UpperCAmelCase = None
set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case )
continue
if not is_used:
unused_weights.append(snake_case )
logger.warning(f"Unused weights: {unused_weights}" )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
_UpperCAmelCase = full_name.split("""conv_layers.""" )[-1]
_UpperCAmelCase = name.split(""".""" )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_UpperCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , ) -> Union[str, Any]:
if config_path is not None:
_UpperCAmelCase = SpeechTaConfig.from_pretrained(snake_case )
else:
_UpperCAmelCase = SpeechTaConfig()
if task == "s2t":
_UpperCAmelCase = config.max_text_positions
_UpperCAmelCase = SpeechTaForSpeechToText(snake_case )
elif task == "t2s":
_UpperCAmelCase = 1_8_7_6
_UpperCAmelCase = 6_0_0
_UpperCAmelCase = config.max_speech_positions
_UpperCAmelCase = SpeechTaForTextToSpeech(snake_case )
elif task == "s2s":
_UpperCAmelCase = 1_8_7_6
_UpperCAmelCase = config.max_speech_positions
_UpperCAmelCase = SpeechTaForSpeechToSpeech(snake_case )
else:
raise ValueError(f"Unknown task name: {task}" )
if vocab_path:
_UpperCAmelCase = SpeechTaTokenizer(snake_case , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken("""<mask>""" , lstrip=snake_case , rstrip=snake_case )
_UpperCAmelCase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
_UpperCAmelCase = SpeechTaFeatureExtractor()
_UpperCAmelCase = SpeechTaProcessor(tokenizer=snake_case , feature_extractor=snake_case )
processor.save_pretrained(snake_case )
_UpperCAmelCase = torch.load(snake_case )
recursively_load_weights(fairseq_checkpoint["""model"""] , snake_case , snake_case )
model.save_pretrained(snake_case )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(snake_case )
model.push_to_hub(snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
) | 175 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.