code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json" ), } class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "longformer" def __init__( self : Dict , _A : Union[List[int], int] = 512 , _A : int = 2 , _A : int = 1 , _A : int = 0 , _A : int = 2 , _A : int = 3_0522 , _A : int = 768 , _A : int = 12 , _A : int = 12 , _A : int = 3072 , _A : str = "gelu" , _A : float = 0.1 , _A : float = 0.1 , _A : int = 512 , _A : int = 2 , _A : float = 0.02 , _A : float = 1e-12 , _A : bool = False , **_A : str , ): super().__init__(pad_token_id=_A , **_A ) _UpperCamelCase = attention_window _UpperCamelCase = sep_token_id _UpperCamelCase = bos_token_id _UpperCamelCase = eos_token_id _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = hidden_act _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = onnx_export class lowerCAmelCase_ ( __lowercase ): def __init__( self : Optional[Any] , _A : "PretrainedConfig" , _A : str = "default" , _A : "List[PatchingSpec]" = None ): super().__init__(_A , _A , _A ) _UpperCamelCase = True @property def UpperCamelCase_ ( self : Union[str, Any] ): if self.task == "multiple-choice": _UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _UpperCamelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''global_attention_mask''', dynamic_axis), ] ) @property def UpperCamelCase_ ( self : str ): _UpperCamelCase = super().outputs if self.task == "default": _UpperCamelCase = {0: '''batch'''} return outputs @property def UpperCamelCase_ ( self : int ): return 1e-4 @property def UpperCamelCase_ ( self : Dict ): # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 14 ) def UpperCamelCase_ ( self : Optional[Any] , _A : "PreTrainedTokenizerBase" , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ): _UpperCamelCase = super().generate_dummy_inputs( preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly _UpperCamelCase = torch.zeros_like(inputs['''input_ids'''] ) # make every second token global _UpperCamelCase = 1 return inputs
709
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = (DPMSolverSDEScheduler,) UpperCAmelCase = 10 def UpperCamelCase_ ( self : Tuple , **_A : Union[str, Any] ): _UpperCamelCase = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**_A ) return config def UpperCamelCase_ ( self : List[Any] ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def UpperCamelCase_ ( self : List[Any] ): for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def UpperCamelCase_ ( self : List[str] ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_A ) def UpperCamelCase_ ( self : Union[str, Any] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for i, t in enumerate(scheduler.timesteps ): _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2 assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for i, t in enumerate(scheduler.timesteps ): _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2 assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2 assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2 assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3 def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps , device=_A ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2 assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A , use_karras_sigmas=_A ) scheduler.set_timesteps(self.num_inference_steps , device=_A ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for t in scheduler.timesteps: _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
71
0
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker _lowerCAmelCase = "CompVis/stable-diffusion-v1-1" _lowerCAmelCase = "CompVis/stable-diffusion-v1-2" _lowerCAmelCase = "CompVis/stable-diffusion-v1-3" _lowerCAmelCase = "CompVis/stable-diffusion-v1-4" class lowerCAmelCase_ ( __lowercase ): def __init__( self : Dict , _A : AutoencoderKL , _A : CLIPTextModel , _A : CLIPTokenizer , _A : UNetaDConditionModel , _A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _A : StableDiffusionSafetyChecker , _A : CLIPImageProcessor , _A : bool = True , ): super()._init_() _UpperCamelCase = StableDiffusionPipeline.from_pretrained(_A ) _UpperCamelCase = StableDiffusionPipeline.from_pretrained(_A ) _UpperCamelCase = StableDiffusionPipeline.from_pretrained(_A ) _UpperCamelCase = StableDiffusionPipeline( vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , safety_checker=_A , feature_extractor=_A , requires_safety_checker=_A , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def UpperCamelCase_ ( self : Optional[Any] ): return {k: getattr(self , _A ) for k in self.config.keys() if not k.startswith('''_''' )} def UpperCamelCase_ ( self : List[str] , _A : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _UpperCamelCase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_A ) def UpperCamelCase_ ( self : Any ): self.enable_attention_slicing(_A ) @torch.no_grad() def UpperCamelCase_ ( self : Optional[Any] , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : Dict , ): return self.pipea( prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , ) @torch.no_grad() def UpperCamelCase_ ( self : Any , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : Optional[Any] , ): return self.pipea( prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , ) @torch.no_grad() def UpperCamelCase_ ( self : List[str] , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : List[Any] , ): return self.pipea( prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , ) @torch.no_grad() def UpperCamelCase_ ( self : Dict , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : Optional[int] , ): return self.pipea( prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , ) @torch.no_grad() def UpperCamelCase_ ( self : Dict , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : List[Any] , ): _UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(_A ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 _UpperCamelCase = self.textaimg_sda_a( prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , ) # Get first result from Stable Diffusion Checkpoint v1.2 _UpperCamelCase = self.textaimg_sda_a( prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , ) # Get first result from Stable Diffusion Checkpoint v1.3 _UpperCamelCase = self.textaimg_sda_a( prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , ) # Get first result from Stable Diffusion Checkpoint v1.4 _UpperCamelCase = self.textaimg_sda_a( prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
710
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCAmelCase_ : @property def UpperCamelCase_ ( self : Optional[int] ): return self.get_dummy_input() @property def UpperCamelCase_ ( self : Dict ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def UpperCamelCase_ ( self : Union[str, Any] , _A : List[str]=True , _A : Any=False , _A : Union[str, Any]=False , _A : int=False , ): _UpperCamelCase = 4 _UpperCamelCase = 32 _UpperCamelCase = (32, 32) _UpperCamelCase = torch.manual_seed(0 ) _UpperCamelCase = torch.device(_A ) _UpperCamelCase = (batch_size, num_channels) + sizes _UpperCamelCase = randn_tensor(_A , generator=_A , device=_A ) _UpperCamelCase = {'''hidden_states''': hidden_states} if include_temb: _UpperCamelCase = 128 _UpperCamelCase = randn_tensor((batch_size, temb_channels) , generator=_A , device=_A ) if include_res_hidden_states_tuple: _UpperCamelCase = torch.manual_seed(1 ) _UpperCamelCase = (randn_tensor(_A , generator=_A , device=_A ),) if include_encoder_hidden_states: _UpperCamelCase = floats_tensor((batch_size, 32, 32) ).to(_A ) if include_skip_sample: _UpperCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_A , device=_A ) return dummy_input def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = { '''in_channels''': 32, '''out_channels''': 32, '''temb_channels''': 128, } if self.block_type == "up": _UpperCamelCase = 32 if self.block_type == "mid": init_dict.pop('''out_channels''' ) _UpperCamelCase = self.dummy_input return init_dict, inputs_dict def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.block_class(**_A ) unet_block.to(_A ) unet_block.eval() with torch.no_grad(): _UpperCamelCase = unet_block(**_A ) if isinstance(_A , _A ): _UpperCamelCase = output[0] self.assertEqual(output.shape , self.output_shape ) _UpperCamelCase = output[0, -1, -3:, -3:] _UpperCamelCase = torch.tensor(_A ).to(_A ) assert torch_all_close(output_slice.flatten() , _A , atol=5e-3 ) @unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' ) def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.block_class(**_A ) model.to(_A ) model.train() _UpperCamelCase = model(**_A ) if isinstance(_A , _A ): _UpperCamelCase = output[0] _UpperCamelCase = torch.device(_A ) _UpperCamelCase = randn_tensor(output.shape , device=_A ) _UpperCamelCase = torch.nn.functional.mse_loss(_A , _A ) loss.backward()
71
0
import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _lowerCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right _lowerCAmelCase = 50_003 _lowerCAmelCase = 50_002 @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( __lowercase, unittest.TestCase ): UpperCAmelCase = PLBartTokenizer UpperCAmelCase = None UpperCAmelCase = False def UpperCamelCase_ ( self : Any ): super().setUp() # We have a SentencePiece fixture for testing _UpperCamelCase = PLBartTokenizer(_A , language_codes='''base''' , keep_accents=_A ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = PLBartTokenizer(_A , language_codes='''base''' , keep_accents=_A ) _UpperCamelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _UpperCamelCase = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual( _A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _UpperCamelCase = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) _UpperCamelCase = tokenizer.vocab_size _UpperCamelCase = [tokenizer.convert_ids_to_tokens(_A ) for x in range(end - 4 , _A )] self.assertListEqual(_A , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] ) _UpperCamelCase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go''' _UpperCamelCase = tokenizer(_A ).input_ids self.assertEqual( tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A ) , _A , ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = PLBartTokenizer(_A , language_codes='''multi''' , keep_accents=_A ) _UpperCamelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _UpperCamelCase = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual( _A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _UpperCamelCase = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) _UpperCamelCase = tokenizer.vocab_size _UpperCamelCase = [tokenizer.convert_ids_to_tokens(_A ) for x in range(end - 7 , _A )] self.assertListEqual( _A , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] ) _UpperCamelCase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go''' _UpperCamelCase = tokenizer(_A ).input_ids self.assertEqual( tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A ) , _A , ) @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): UpperCAmelCase = "uclanlp/plbart-python-en_XX" UpperCAmelCase = [ "def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])", "def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])", ] UpperCAmelCase = [ "Returns the maximum value of a b c.", "Sums the values of a b c.", ] UpperCAmelCase = [ 134, 5452, 33460, 33441, 33463, 33465, 33463, 33449, 988, 20, 33456, 19, 33456, 771, 39, 4258, 889, 3318, 33441, 33463, 33465, 33463, 33449, 2471, 2, PYTHON_CODE, ] @classmethod def UpperCamelCase_ ( cls : List[str] ): _UpperCamelCase = PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' ) _UpperCamelCase = 1 return cls def UpperCamelCase_ ( self : Optional[Any] ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 5_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 5_0002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 5_0003 ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _A ) def UpperCamelCase_ ( self : Union[str, Any] ): self.assertIn(_A , self.tokenizer.all_special_ids ) _UpperCamelCase = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2] _UpperCamelCase = self.tokenizer.decode(_A , skip_special_tokens=_A ) _UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A ) self.assertEqual(_A , _A ) self.assertNotIn(self.tokenizer.eos_token , _A ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20] self.assertIsInstance(src_text[0] , _A ) _UpperCamelCase = 10 _UpperCamelCase = self.tokenizer(_A , max_length=_A , truncation=_A ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , _A ) self.assertEqual(len(_A ) , _A ) def UpperCamelCase_ ( self : List[Any] ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [5_0004, 5_0001] ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_A ) _UpperCamelCase = PLBartTokenizer.from_pretrained(_A ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _A ) @require_torch def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors='''pt''' ) _UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] , _A ) self.assertEqual(batch.decoder_input_ids[1][-1] , 2 ) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] ) @require_torch def UpperCamelCase_ ( self : Any ): _UpperCamelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=_A , truncation=_A , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) _UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(_A , _A ) self.assertEqual((2, 26) , batch.input_ids.shape ) self.assertEqual((2, 26) , batch.attention_mask.shape ) _UpperCamelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , _A ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.tokenizer(self.src_text , padding=_A , truncation=_A , max_length=3 , return_tensors='''pt''' ) _UpperCamelCase = self.tokenizer( text_target=self.tgt_text , padding=_A , truncation=_A , max_length=10 , return_tensors='''pt''' ) _UpperCamelCase = targets['''input_ids'''] _UpperCamelCase = shift_tokens_right(_A , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' ) self.assertEqual( nested_simplify(_A ) , { # A, test, EOS, en_XX '''input_ids''': [[150, 242, 2, 5_0003]], '''attention_mask''': [[1, 1, 1, 1]], # java '''forced_bos_token_id''': 5_0001, } , )
711
def _snake_case ( __snake_case ): if not isinstance(__snake_case , __snake_case ): raise TypeError('''Input value must be an \'int\' type''' ) _UpperCamelCase = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
71
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) def _snake_case ( __snake_case ): _UpperCamelCase = '''huggingface/label-files''' _UpperCamelCase = '''imagenet-1k-id2label.json''' _UpperCamelCase = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) ) _UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()} _UpperCamelCase = {v: k for k, v in idalabel.items()} _UpperCamelCase = '''std_conv''' if '''bit''' in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" _UpperCamelCase = BitConfig( conv_layer=__snake_case , num_labels=1000 , idalabel=__snake_case , labelaid=__snake_case , ) return config def _snake_case ( __snake_case ): if "stem.conv" in name: _UpperCamelCase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' ) if "blocks" in name: _UpperCamelCase = name.replace('''blocks''' , '''layers''' ) if "head.fc" in name: _UpperCamelCase = name.replace('''head.fc''' , '''classifier.1''' ) if name.startswith('''norm''' ): _UpperCamelCase = '''bit.''' + name if "bit" not in name and "classifier" not in name: _UpperCamelCase = '''bit.encoder.''' + name return name def _snake_case ( ): _UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _UpperCamelCase = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ) return im @torch.no_grad() def _snake_case ( __snake_case , __snake_case , __snake_case=False ): _UpperCamelCase = get_config(__snake_case ) # load original model from timm _UpperCamelCase = create_model(__snake_case , pretrained=__snake_case ) timm_model.eval() # load state_dict of original model _UpperCamelCase = timm_model.state_dict() for key in state_dict.copy().keys(): _UpperCamelCase = state_dict.pop(__snake_case ) _UpperCamelCase = val.squeeze() if '''head''' in key else val # load HuggingFace model _UpperCamelCase = BitForImageClassification(__snake_case ) model.eval() model.load_state_dict(__snake_case ) # create image processor _UpperCamelCase = create_transform(**resolve_data_config({} , model=__snake_case ) ) _UpperCamelCase = transform.transforms _UpperCamelCase = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } _UpperCamelCase = BitImageProcessor( do_resize=__snake_case , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__snake_case , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__snake_case , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _UpperCamelCase = prepare_img() _UpperCamelCase = transform(__snake_case ).unsqueeze(0 ) _UpperCamelCase = processor(__snake_case , return_tensors='''pt''' ).pixel_values # verify pixel values assert torch.allclose(__snake_case , __snake_case ) # verify logits with torch.no_grad(): _UpperCamelCase = model(__snake_case ) _UpperCamelCase = outputs.logits print('''Logits:''' , logits[0, :3] ) print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] ) _UpperCamelCase = timm_model(__snake_case ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__snake_case , outputs.logits , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(__snake_case ).mkdir(exist_ok=__snake_case ) print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(__snake_case ) processor.save_pretrained(__snake_case ) if push_to_hub: print(f"""Pushing model {model_name} and processor to the hub""" ) model.push_to_hub(f"""ybelkada/{model_name}""" ) processor.push_to_hub(f"""ybelkada/{model_name}""" ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="resnetv2_50x1_bitm", type=str, help="Name of the BiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the hub.", ) _lowerCAmelCase = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
712
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } _lowerCAmelCase = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): for attribute in key.split('''.''' ): _UpperCamelCase = getattr(__snake_case , __snake_case ) if weight_type is not None: _UpperCamelCase = getattr(__snake_case , __snake_case ).shape else: _UpperCamelCase = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": _UpperCamelCase = value elif weight_type == "weight_g": _UpperCamelCase = value elif weight_type == "weight_v": _UpperCamelCase = value elif weight_type == "bias": _UpperCamelCase = value else: _UpperCamelCase = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = [] _UpperCamelCase = fairseq_model.state_dict() _UpperCamelCase = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight _UpperCamelCase = None for name, value in fairseq_dict.items(): _UpperCamelCase = False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _UpperCamelCase = True elif name.split('''.''' )[0] == "proj": _UpperCamelCase = fairseq_model.proj _UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _UpperCamelCase = True if "*" in mapped_key: _UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2] _UpperCamelCase = mapped_key.replace('''*''' , __snake_case ) if "weight_g" in name: _UpperCamelCase = '''weight_g''' elif "weight_v" in name: _UpperCamelCase = '''weight_v''' elif "bias" in name: _UpperCamelCase = '''bias''' elif "weight" in name: _UpperCamelCase = '''weight''' else: _UpperCamelCase = None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) return proj_weight def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = full_name.split('''conv_layers.''' )[-1] _UpperCamelCase = name.split('''.''' ) _UpperCamelCase = int(items[0] ) _UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _UpperCamelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__snake_case ) def _snake_case ( __snake_case ): _UpperCamelCase , _UpperCamelCase = emb.weight.shape _UpperCamelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case ) _UpperCamelCase = emb.weight.data return lin_layer def _snake_case ( __snake_case ): with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: _UpperCamelCase = f.readlines() _UpperCamelCase = [line.split(''' ''' )[0] for line in lines] _UpperCamelCase = len(__snake_case ) _UpperCamelCase = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ): _UpperCamelCase = WavaVecaConfig.from_pretrained(__snake_case ) _UpperCamelCase = SpeechaTextaConfig.from_pretrained( __snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case ) _UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) _UpperCamelCase = model[0].eval() # set weights for wav2vec2 encoder _UpperCamelCase = WavaVecaModel(__snake_case ) _UpperCamelCase = recursively_load_weights_wavaveca(model.encoder , __snake_case ) _UpperCamelCase = SpeechaTextaForCausalLM(__snake_case ) _UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case ) # set output linear layer unexpected_keys.remove('''embed_out''' ) _UpperCamelCase = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) _UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case ) _UpperCamelCase = False # add projection layer _UpperCamelCase = nn.Parameter(projection_layer.weight ) _UpperCamelCase = nn.Parameter(projection_layer.bias ) _UpperCamelCase = create_vocab_dict(__snake_case ) with open(os.path.join(__snake_case , '''vocab.json''' ) , '''w''' ) as fp: json.dump(__snake_case , __snake_case ) _UpperCamelCase = SpeechaTextaTokenizer(os.path.join(__snake_case , '''vocab.json''' ) ) tokenizer.save_pretrained(__snake_case ) _UpperCamelCase = hf_wavavec.config.to_dict() _UpperCamelCase = tokenizer.pad_token_id _UpperCamelCase = tokenizer.bos_token_id _UpperCamelCase = tokenizer.eos_token_id _UpperCamelCase = '''speech_to_text_2''' _UpperCamelCase = '''wav2vec2''' _UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case ) hf_wavavec.save_pretrained(__snake_case ) feature_extractor.save_pretrained(__snake_case ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") _lowerCAmelCase = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
71
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase = { "configuration_x_clip": [ "XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "XCLIPConfig", "XCLIPTextConfig", "XCLIPVisionConfig", ], "processing_x_clip": ["XCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "XCLIPModel", "XCLIPPreTrainedModel", "XCLIPTextModel", "XCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
713
from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[str]=13 , _A : Union[str, Any]=7 , _A : int=True , _A : Optional[int]=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : Optional[int]=99 , _A : Union[str, Any]=32 , _A : Dict=2 , _A : List[Any]=4 , _A : Optional[Any]=37 , _A : int="gelu" , _A : Optional[int]=0.1 , _A : str=0.1 , _A : List[str]=512 , _A : Optional[Any]=16 , _A : Optional[Any]=2 , _A : Optional[int]=0.02 , _A : str=False , _A : int=True , _A : Any="None" , _A : Dict=3 , _A : List[Any]=4 , _A : Optional[Any]=None , ): _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_input_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_labels _UpperCamelCase = num_choices _UpperCamelCase = relative_attention _UpperCamelCase = position_biased_input _UpperCamelCase = pos_att_type _UpperCamelCase = scope def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_input_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCamelCase = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : int , _A : Optional[Any] ): _UpperCamelCase = TFDebertaVaModel(config=_A ) _UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCamelCase = [input_ids, input_mask] _UpperCamelCase = model(_A ) _UpperCamelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self : Dict , _A : Optional[int] , _A : Any , _A : Dict , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] ): _UpperCamelCase = TFDebertaVaForMaskedLM(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self : Dict , _A : Dict , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : Optional[Any] , _A : Tuple , _A : int ): _UpperCamelCase = self.num_labels _UpperCamelCase = TFDebertaVaForSequenceClassification(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : Optional[int] , _A : Any , _A : List[Any] , _A : Dict , _A : Union[str, Any] , _A : List[str] ): _UpperCamelCase = self.num_labels _UpperCamelCase = TFDebertaVaForTokenClassification(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self : Dict , _A : Optional[Any] , _A : Optional[int] , _A : Any , _A : List[str] , _A : str , _A : Optional[int] , _A : str ): _UpperCamelCase = TFDebertaVaForQuestionAnswering(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self : Any ): _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ): UpperCAmelCase = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) UpperCAmelCase = ( { "feature-extraction": TFDebertaVaModel, "fill-mask": TFDebertaVaForMaskedLM, "question-answering": TFDebertaVaForQuestionAnswering, "text-classification": TFDebertaVaForSequenceClassification, "token-classification": TFDebertaVaForTokenClassification, "zero-shot": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = TFDebertaVaModelTester(self ) _UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 ) def UpperCamelCase_ ( self : Any ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) self.assertIsNotNone(_A ) @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def UpperCamelCase_ ( self : List[Any] ): pass @slow def UpperCamelCase_ ( self : int ): _UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) _UpperCamelCase = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) _UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _UpperCamelCase = model(_A , attention_mask=_A )[0] _UpperCamelCase = tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1e-4 )
71
0
import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 _lowerCAmelCase = get_tests_dir("fixtures/dummy-config.json") class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = 0 def UpperCamelCase_ ( self : int ): self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = AutoConfig.from_pretrained('''bert-base-uncased''' ) self.assertIsInstance(_A , _A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = AutoConfig.from_pretrained(_A ) self.assertIsInstance(_A , _A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = AutoConfig.from_pretrained(_A ) self.assertIsInstance(_A , _A ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = AutoConfig.for_model('''roberta''' ) self.assertIsInstance(_A , _A ) def UpperCamelCase_ ( self : int ): with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. _UpperCamelCase = os.path.join(_A , '''fake-roberta''' ) os.makedirs(_A , exist_ok=_A ) with open(os.path.join(_A , '''config.json''' ) , '''w''' ) as f: f.write(json.dumps({} ) ) _UpperCamelCase = AutoConfig.from_pretrained(_A ) self.assertEqual(type(_A ) , _A ) def UpperCamelCase_ ( self : Any ): try: AutoConfig.register('''custom''' , _A ) # Wrong model type will raise an error with self.assertRaises(_A ): AutoConfig.register('''model''' , _A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoConfig.register('''bert''' , _A ) # Now that the config is registered, it can be used as any other config with the auto-API _UpperCamelCase = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_A ) _UpperCamelCase = AutoConfig.from_pretrained(_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def UpperCamelCase_ ( self : Dict ): with self.assertRaisesRegex( _A , '''bert-base is not a local folder and is not a valid model identifier''' ): _UpperCamelCase = AutoConfig.from_pretrained('''bert-base''' ) def UpperCamelCase_ ( self : Optional[int] ): with self.assertRaisesRegex( _A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): _UpperCamelCase = AutoConfig.from_pretrained(_A , revision='''aaaaaa''' ) def UpperCamelCase_ ( self : Any ): with self.assertRaisesRegex( _A , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ): _UpperCamelCase = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' ) def UpperCamelCase_ ( self : Optional[int] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_A ): _UpperCamelCase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_A ): _UpperCamelCase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_A ) _UpperCamelCase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_A ) self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_A ) _UpperCamelCase = AutoConfig.from_pretrained(_A , trust_remote_code=_A ) self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' ) def UpperCamelCase_ ( self : List[Any] ): class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "new-model" try: AutoConfig.register('''new-model''' , _A ) # If remote code is not set, the default is to use local _UpperCamelCase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ) self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' ) # If remote code is disabled, we load the local one. _UpperCamelCase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_A ) self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' ) # If remote is enabled, we load from the Hub _UpperCamelCase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_A ) self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
714
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ): # Return True if there is node that has not iterated. _UpperCamelCase = [False] * len(__snake_case ) _UpperCamelCase = [] queue.append(__snake_case ) _UpperCamelCase = True while queue: _UpperCamelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__snake_case ) _UpperCamelCase = True _UpperCamelCase = u return visited[t] def _snake_case ( __snake_case , __snake_case , __snake_case ): # This array is filled by BFS and to store path _UpperCamelCase = [-1] * (len(__snake_case )) _UpperCamelCase = 0 while bfs(__snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = float('''Inf''' ) _UpperCamelCase = sink while s != source: # Find the minimum value in select path _UpperCamelCase = min(__snake_case , graph[parent[s]][s] ) _UpperCamelCase = parent[s] max_flow += path_flow _UpperCamelCase = sink while v != source: _UpperCamelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCamelCase = parent[v] return max_flow _lowerCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _lowerCAmelCase, _lowerCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
71
0
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def _snake_case ( __snake_case , __snake_case , __snake_case ): _UpperCamelCase = 1.5 _UpperCamelCase = int(factor * num_class_images ) _UpperCamelCase = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__snake_case , aesthetic_weight=0.1 ) os.makedirs(f"""{class_data_dir}/images""" , exist_ok=__snake_case ) if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: _UpperCamelCase = client.query(text=__snake_case ) if len(__snake_case ) >= factor * num_class_images or num_images > 1E4: break else: _UpperCamelCase = int(factor * num_images ) _UpperCamelCase = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__snake_case , aesthetic_weight=0.1 , ) _UpperCamelCase = 0 _UpperCamelCase = 0 _UpperCamelCase = tqdm(desc='''downloading real regularization images''' , total=__snake_case ) with open(f"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(f"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open( f"""{class_data_dir}/images.txt""" , '''w''' ) as fa: while total < num_class_images: _UpperCamelCase = class_images[count] count += 1 try: _UpperCamelCase = requests.get(images['''url'''] ) if img.status_code == 200: _UpperCamelCase = Image.open(BytesIO(img.content ) ) with open(f"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f: f.write(img.content ) fa.write(images['''caption'''] + '''\n''' ) fa.write(images['''url'''] + '''\n''' ) fa.write(f"""{class_data_dir}/images/{total}.jpg""" + '''\n''' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def _snake_case ( ): _UpperCamelCase = argparse.ArgumentParser('''''' , add_help=__snake_case ) parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__snake_case , type=__snake_case ) parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__snake_case , type=__snake_case ) parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=__snake_case ) return parser.parse_args() if __name__ == "__main__": _lowerCAmelCase = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowerCAmelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST", "UniSpeechForCTC", "UniSpeechForPreTraining", "UniSpeechForSequenceClassification", "UniSpeechModel", "UniSpeechPreTrainedModel", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
71
0
import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() _lowerCAmelCase = 2 class lowerCAmelCase_ : def __init__( self : Optional[Any] , *, # begin keyword-only arguments _A : Optional[Any]="<s>" , _A : Union[str, Any]="<pad>" , _A : Dict="</s>" , _A : Optional[Any]="<unk>" , _A : int=None , ): _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = bos, unk, pad, eos _UpperCamelCase = [] _UpperCamelCase = [] _UpperCamelCase = {} _UpperCamelCase = self.add_symbol(_A ) _UpperCamelCase = self.add_symbol(_A ) _UpperCamelCase = self.add_symbol(_A ) _UpperCamelCase = self.add_symbol(_A ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(_A ) _UpperCamelCase = len(self.symbols ) def __eq__( self : Optional[Any] , _A : int ): return self.indices == other.indices def __getitem__( self : Tuple , _A : Tuple ): if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : List[Any] ): return len(self.symbols ) def __contains__( self : Union[str, Any] , _A : Optional[Any] ): return sym in self.indices @classmethod def UpperCamelCase_ ( cls : List[str] , _A : Any ): _UpperCamelCase = cls() d.add_from_file(_A ) return d def UpperCamelCase_ ( self : List[Any] , _A : int , _A : Union[str, Any]=1 , _A : Union[str, Any]=False ): if word in self.indices and not overwrite: _UpperCamelCase = self.indices[word] _UpperCamelCase = self.count[idx] + n return idx else: _UpperCamelCase = len(self.symbols ) _UpperCamelCase = idx self.symbols.append(_A ) self.count.append(_A ) return idx def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[int] ): return 0 def UpperCamelCase_ ( self : Tuple , _A : Dict ): if isinstance(_A , _A ): try: with open(_A , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(_A ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(_A ) ) return _UpperCamelCase = f.readlines() _UpperCamelCase = self._load_meta(_A ) for line in lines[indices_start_line:]: try: _UpperCamelCase , _UpperCamelCase = line.rstrip().rsplit(''' ''' , 1 ) if field == "#fairseq:overwrite": _UpperCamelCase = True _UpperCamelCase , _UpperCamelCase = line.rsplit(''' ''' , 1 ) else: _UpperCamelCase = False _UpperCamelCase = int(_A ) _UpperCamelCase = line if word in self and not overwrite: raise RuntimeError( '''Duplicate word found when loading Dictionary: \'{}\'. ''' '''Duplicate words can overwrite earlier ones by adding the ''' '''#fairseq:overwrite flag at the end of the corresponding row ''' '''in the dictionary file. If using the Camembert model, please ''' '''download an updated copy of the model file.'''.format(_A ) ) self.add_symbol(_A , n=_A , overwrite=_A ) except ValueError: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' ) def _snake_case ( __snake_case ): # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} _UpperCamelCase = dict((re.sub(R'''@@$''' , '''''' , __snake_case ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , __snake_case ), v) for k, v in d.items() ) _UpperCamelCase = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[f"""{k}</w>"""] _UpperCamelCase = d[k] # restore return da def _snake_case ( __snake_case , __snake_case ): # prep if not os.path.exists(__snake_case ): raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" ) os.makedirs(__snake_case , exist_ok=__snake_case ) print(f"""Writing results to {pytorch_dump_folder_path}""" ) # handle various types of models _UpperCamelCase = os.path.join(__snake_case , '''checkpoint.pt''' ) if not os.path.isfile(__snake_case ): raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" ) _UpperCamelCase = torch.load(__snake_case , map_location='''cpu''' ) _UpperCamelCase = chkpt['''cfg''']['''model'''] # dicts _UpperCamelCase = os.path.join(__snake_case , '''dict.txt''' ) if not os.path.isfile(__snake_case ): raise ValueError(f"""path to the file {dict_file} does not exist!""" ) _UpperCamelCase = Dictionary.load(__snake_case ) _UpperCamelCase = rewrite_dict_keys(src_dict.indices ) _UpperCamelCase = len(__snake_case ) _UpperCamelCase = os.path.join(__snake_case , VOCAB_FILES_NAMES['''vocab_file'''] ) print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" ) with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) ) # merges_file (bpecodes) _UpperCamelCase = os.path.join(__snake_case , '''bpecodes''' ) if not os.path.isfile(__snake_case ): raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" ) _UpperCamelCase = os.path.join(__snake_case , VOCAB_FILES_NAMES['''merges_file'''] ) shutil.copyfile(__snake_case , __snake_case ) # model config _UpperCamelCase = os.path.join(__snake_case , '''config.json''' ) _UpperCamelCase = { '''activation_dropout''': args['''activation_dropout'''], '''architectures''': ['''BioGptForCausalLM'''], '''attention_probs_dropout_prob''': args['''attention_dropout'''], '''bos_token_id''': 0, '''eos_token_id''': 2, '''hidden_act''': args['''activation_fn'''], '''hidden_dropout_prob''': args['''dropout'''], '''hidden_size''': args['''decoder_embed_dim'''], '''initializer_range''': 0.02, '''intermediate_size''': args['''decoder_ffn_embed_dim'''], '''layer_norm_eps''': 1E-12, '''layerdrop''': args['''decoder_layerdrop'''], '''max_position_embeddings''': args['''max_target_positions'''], '''model_type''': '''biogpt''', '''num_attention_heads''': args['''decoder_attention_heads'''], '''num_hidden_layers''': args['''decoder_layers'''], '''pad_token_id''': 1, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_decoder_input_output_embed'''], '''vocab_size''': src_vocab_size, } # good hparam defaults to start with print(f"""Generating {biogpt_model_config_file}""" ) with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) ) # tokenizer config _UpperCamelCase = os.path.join(__snake_case , __snake_case ) _UpperCamelCase = { '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''model_max_length''': 1024, '''pad_token''': '''<pad>''', '''special_tokens_map_file''': None, '''tokenizer_class''': '''BioGptTokenizer''', '''unk_token''': '''<unk>''', } print(f"""Generating {biogpt_tokenizer_config_file}""" ) with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) ) # model _UpperCamelCase = chkpt['''model'''] # remove unneeded keys _UpperCamelCase = [ '''decoder.version''', ] for k in ignore_keys: model_state_dict.pop(__snake_case , __snake_case ) _UpperCamelCase = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith('''output_projection.weight''' ): _UpperCamelCase = model_state_dict.pop(__snake_case ) else: _UpperCamelCase = model_state_dict.pop(__snake_case ) _UpperCamelCase = BioGptConfig.from_pretrained(__snake_case ) _UpperCamelCase = BioGptForCausalLM(__snake_case ) # check that it loads ok model_new.load_state_dict(__snake_case ) # save _UpperCamelCase = os.path.join(__snake_case , __snake_case ) print(f"""Generating {pytorch_weights_dump_path}""" ) torch.save(__snake_case , __snake_case ) print('''Conversion is done!''' ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--biogpt_checkpoint_path", default=None, type=str, required=True, help=( "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts," " bpecodes, etc." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _lowerCAmelCase = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
716
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = tempfile.mkdtemp() # fmt: off _UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _UpperCamelCase = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } _UpperCamelCase = os.path.join(self.tmpdirname , _A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_A , _A ) def UpperCamelCase_ ( self : Tuple , **_A : Optional[Any] ): return BertTokenizer.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : List[Any] , **_A : Union[str, Any] ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : int ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _UpperCamelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = self.get_image_processor() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCamelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = image_processor(_A , return_tensors='''np''' ) _UpperCamelCase = processor(images=_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = processor(text=_A ) _UpperCamelCase = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(_A ): processor() def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCamelCase = processor.batch_decode(_A ) _UpperCamelCase = tokenizer.batch_decode(_A ) self.assertListEqual(_A , _A ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
71
0
from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "new-model" if is_tf_available(): class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = NewModelConfig @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = '''bert-base-cased''' _UpperCamelCase = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) _UpperCamelCase = TFAutoModel.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = '''bert-base-cased''' _UpperCamelCase = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) _UpperCamelCase = TFAutoModelForPreTraining.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def UpperCamelCase_ ( self : Optional[Any] ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) _UpperCamelCase = TFAutoModelForCausalLM.from_pretrained(_A ) _UpperCamelCase , _UpperCamelCase = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def UpperCamelCase_ ( self : List[str] ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) _UpperCamelCase = TFAutoModelWithLMHead.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def UpperCamelCase_ ( self : Optional[int] ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) _UpperCamelCase = TFAutoModelForMaskedLM.from_pretrained(_A ) _UpperCamelCase , _UpperCamelCase = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def UpperCamelCase_ ( self : Any ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) _UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(_A ) _UpperCamelCase , _UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def UpperCamelCase_ ( self : str ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: _UpperCamelCase = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) _UpperCamelCase = TFAutoModelForSequenceClassification.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def UpperCamelCase_ ( self : Any ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: _UpperCamelCase = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) _UpperCamelCase = TFAutoModelForQuestionAnswering.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow @require_tensorflow_probability def UpperCamelCase_ ( self : List[str] ): for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: _UpperCamelCase = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) _UpperCamelCase = TFAutoModelForTableQuestionAnswering.from_pretrained(_A ) _UpperCamelCase , _UpperCamelCase = TFAutoModelForTableQuestionAnswering.from_pretrained( _A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = TFAutoModelWithLMHead.from_pretrained(_A ) self.assertIsInstance(_A , _A ) self.assertEqual(model.num_parameters() , 1_4410 ) self.assertEqual(model.num_parameters(only_trainable=_A ) , 1_4410 ) def UpperCamelCase_ ( self : Any ): _UpperCamelCase = TFAutoModelWithLMHead.from_pretrained(_A ) self.assertIsInstance(_A , _A ) self.assertEqual(model.num_parameters() , 1_4410 ) self.assertEqual(model.num_parameters(only_trainable=_A ) , 1_4410 ) def UpperCamelCase_ ( self : Optional[int] ): # For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel _UpperCamelCase = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' ) self.assertIsInstance(_A , _A ) _UpperCamelCase = copy.deepcopy(model.config ) _UpperCamelCase = ['''FunnelBaseModel'''] _UpperCamelCase = TFAutoModel.from_config(_A ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_A ) _UpperCamelCase = TFAutoModel.from_pretrained(_A ) self.assertIsInstance(_A , _A ) def UpperCamelCase_ ( self : Tuple ): try: AutoConfig.register('''new-model''' , _A ) _UpperCamelCase = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(_A ): auto_class.register(_A , _A ) auto_class.register(_A , _A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): auto_class.register(_A , _A ) # Now that the config is registered, it can be used as any other config with the auto-API _UpperCamelCase = BertModelTester(self ).get_config() _UpperCamelCase = NewModelConfig(**tiny_config.to_dict() ) _UpperCamelCase = auto_class.from_config(_A ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_A ) _UpperCamelCase = auto_class.from_pretrained(_A ) self.assertIsInstance(_A , _A ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def UpperCamelCase_ ( self : Any ): with self.assertRaisesRegex( _A , '''bert-base is not a local folder and is not a valid model identifier''' ): _UpperCamelCase = TFAutoModel.from_pretrained('''bert-base''' ) def UpperCamelCase_ ( self : Optional[Any] ): with self.assertRaisesRegex( _A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): _UpperCamelCase = TFAutoModel.from_pretrained(_A , revision='''aaaaaa''' ) def UpperCamelCase_ ( self : str ): with self.assertRaisesRegex( _A , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ): _UpperCamelCase = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def UpperCamelCase_ ( self : Optional[int] ): with self.assertRaisesRegex(_A , '''Use `from_pt=True` to load this model''' ): _UpperCamelCase = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) def UpperCamelCase_ ( self : Dict ): # Make sure we have cached the model. _UpperCamelCase = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: _UpperCamelCase = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint _UpperCamelCase = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) with RequestCounter() as counter: _UpperCamelCase = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
717
def _snake_case ( __snake_case , __snake_case , __snake_case ): if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod else: _UpperCamelCase = binary_exponentiation(__snake_case , n / 2 , __snake_case ) return (b * b) % mod # a prime number _lowerCAmelCase = 701 _lowerCAmelCase = 1_000_000_000 _lowerCAmelCase = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
71
0
def _snake_case ( __snake_case , __snake_case ): return abs(__snake_case ) if a == 0 else greatest_common_divisor(b % a , __snake_case ) def _snake_case ( __snake_case , __snake_case ): while y: # --> when y=0 then loop will terminate and return x as final GCD. _UpperCamelCase , _UpperCamelCase = y, x % y return abs(__snake_case ) def _snake_case ( ): try: _UpperCamelCase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) _UpperCamelCase = int(nums[0] ) _UpperCamelCase = int(nums[1] ) print( f"""greatest_common_divisor({num_a}, {num_a}) = """ f"""{greatest_common_divisor(__snake_case , __snake_case )}""" ) print(f"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__snake_case , __snake_case )}""" ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
718
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = (1 - _cos) / 2 _UpperCamelCase = 1 - _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = (1 + _cos) / 2 _UpperCamelCase = -1 - _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = _sin / 2 _UpperCamelCase = 0 _UpperCamelCase = -ba _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 1 - alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = 1 + alpha * big_a _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha * big_a _UpperCamelCase = 1 + alpha / big_a _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha / big_a _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase = 2 * sqrt(__snake_case ) * alpha _UpperCamelCase = big_a * (pmc + aaa) _UpperCamelCase = 2 * big_a * mpc _UpperCamelCase = big_a * (pmc - aaa) _UpperCamelCase = ppmc + aaa _UpperCamelCase = -2 * pmpc _UpperCamelCase = ppmc - aaa _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase = 2 * sqrt(__snake_case ) * alpha _UpperCamelCase = big_a * (ppmc + aaa) _UpperCamelCase = -2 * big_a * pmpc _UpperCamelCase = big_a * (ppmc - aaa) _UpperCamelCase = pmc + aaa _UpperCamelCase = 2 * mpc _UpperCamelCase = pmc - aaa _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
71
0
from __future__ import annotations def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase , _UpperCamelCase = position _UpperCamelCase = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] _UpperCamelCase = [] for position in positions: _UpperCamelCase , _UpperCamelCase = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(__snake_case ) return permissible_positions def _snake_case ( __snake_case ): return not any(elem == 0 for row in board for elem in row ) def _snake_case ( __snake_case , __snake_case , __snake_case ): if is_complete(__snake_case ): return True for position in get_valid_pos(__snake_case , len(__snake_case ) ): _UpperCamelCase , _UpperCamelCase = position if board[y][x] == 0: _UpperCamelCase = curr + 1 if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ): return True _UpperCamelCase = 0 return False def _snake_case ( __snake_case ): _UpperCamelCase = [[0 for i in range(__snake_case )] for j in range(__snake_case )] for i in range(__snake_case ): for j in range(__snake_case ): _UpperCamelCase = 1 if open_knight_tour_helper(__snake_case , (i, j) , 1 ): return board _UpperCamelCase = 0 _UpperCamelCase = f"""Open Kight Tour cannot be performed on a board of size {n}""" raise ValueError(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
719
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "gpt_neox" def __init__( self : Union[str, Any] , _A : Union[str, Any]=5_0432 , _A : List[Any]=6144 , _A : int=44 , _A : int=64 , _A : Optional[Any]=2_4576 , _A : Any="gelu" , _A : Tuple=0.25 , _A : Union[str, Any]=1_0000 , _A : Tuple=0.0 , _A : Any=0.0 , _A : int=0.1 , _A : List[str]=2048 , _A : Dict=0.02 , _A : Optional[Any]=1e-5 , _A : Tuple=True , _A : List[Any]=0 , _A : Optional[int]=2 , _A : Optional[int]=False , _A : List[Any]=True , _A : Any=None , **_A : Any , ): super().__init__(bos_token_id=_A , eos_token_id=_A , **_A ) _UpperCamelCase = vocab_size _UpperCamelCase = max_position_embeddings _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = rotary_pct _UpperCamelCase = rotary_emb_base _UpperCamelCase = attention_dropout _UpperCamelCase = hidden_dropout _UpperCamelCase = classifier_dropout _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = use_cache _UpperCamelCase = tie_word_embeddings _UpperCamelCase = use_parallel_residual _UpperCamelCase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def UpperCamelCase_ ( self : str ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F"""got {self.rope_scaling}""" ) _UpperCamelCase = self.rope_scaling.get('''type''' , _A ) _UpperCamelCase = self.rope_scaling.get('''factor''' , _A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
71
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "altclip_text_model" def __init__( self : Optional[Any] , _A : List[str]=25_0002 , _A : Optional[int]=1024 , _A : str=24 , _A : List[Any]=16 , _A : int=4096 , _A : Any="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : List[Any]=514 , _A : Any=1 , _A : List[str]=0.02 , _A : List[str]=0.02 , _A : Optional[int]=1e-05 , _A : Dict=1 , _A : Dict=0 , _A : Union[str, Any]=2 , _A : str="absolute" , _A : Optional[Any]=True , _A : Union[str, Any]=768 , **_A : str , ): super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A ) _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = hidden_act _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = initializer_range _UpperCamelCase = initializer_factor _UpperCamelCase = layer_norm_eps _UpperCamelCase = position_embedding_type _UpperCamelCase = use_cache _UpperCamelCase = project_dim class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "altclip_vision_model" def __init__( self : str , _A : Optional[Any]=768 , _A : int=3072 , _A : Optional[int]=512 , _A : Optional[int]=12 , _A : List[str]=12 , _A : Any=3 , _A : Optional[Any]=224 , _A : str=32 , _A : Dict="quick_gelu" , _A : List[str]=1e-5 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Union[str, Any]=1.0 , **_A : List[Any] , ): super().__init__(**_A ) _UpperCamelCase = hidden_size _UpperCamelCase = intermediate_size _UpperCamelCase = projection_dim _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = num_channels _UpperCamelCase = patch_size _UpperCamelCase = image_size _UpperCamelCase = initializer_range _UpperCamelCase = initializer_factor _UpperCamelCase = attention_dropout _UpperCamelCase = layer_norm_eps _UpperCamelCase = hidden_act @classmethod def UpperCamelCase_ ( cls : List[str] , _A : Union[str, os.PathLike] , **_A : Tuple ): cls._set_token_in_kwargs(_A ) _UpperCamelCase , _UpperCamelCase = cls.get_config_dict(_A , **_A ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''' ) == "altclip": _UpperCamelCase = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_A , **_A ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "altclip" UpperCAmelCase = True def __init__( self : List[str] , _A : Tuple=None , _A : Tuple=None , _A : Union[str, Any]=768 , _A : Optional[int]=2.6592 , **_A : Tuple ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). _UpperCamelCase = kwargs.pop('''text_config_dict''' , _A ) _UpperCamelCase = kwargs.pop('''vision_config_dict''' , _A ) super().__init__(**_A ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: _UpperCamelCase = {} # This is the complete result when using `text_config_dict`. _UpperCamelCase = AltCLIPTextConfig(**_A ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: _UpperCamelCase = ( F"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """ F"""The value `text_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: _UpperCamelCase = ( F"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """ F"""value `text_config[\"{key}\"]` will be overriden.""" ) logger.warning(_A ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: _UpperCamelCase = {} # This is the complete result when using `vision_config_dict`. _UpperCamelCase = AltCLIPVisionConfig(**_A ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: _UpperCamelCase = { str(_A ): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: _UpperCamelCase = ( F"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """ F"""values. The value `vision_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: _UpperCamelCase = ( F"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """ F"""The value `vision_config[\"{key}\"]` will be overriden.""" ) logger.warning(_A ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: _UpperCamelCase = {} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' ) if vision_config is None: _UpperCamelCase = {} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' ) _UpperCamelCase = AltCLIPTextConfig(**_A ) _UpperCamelCase = AltCLIPVisionConfig(**_A ) _UpperCamelCase = projection_dim _UpperCamelCase = logit_scale_init_value _UpperCamelCase = 1.0 @classmethod def UpperCamelCase_ ( cls : int , _A : AltCLIPTextConfig , _A : AltCLIPVisionConfig , **_A : List[str] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = copy.deepcopy(self.__dict__ ) _UpperCamelCase = self.text_config.to_dict() _UpperCamelCase = self.vision_config.to_dict() _UpperCamelCase = self.__class__.model_type return output
720
from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=__lowercase ): UpperCAmelCase = ["keras_nlp"] def __init__( self : Any , *_A : Dict , **_A : List[str] ): requires_backends(self , ['''keras_nlp'''] )
71
0
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( __lowercase, unittest.TestCase ): UpperCAmelCase = AudioLDMPipeline UpperCAmelCase = TEXT_TO_AUDIO_PARAMS UpperCAmelCase = TEXT_TO_AUDIO_BATCH_PARAMS UpperCAmelCase = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def UpperCamelCase_ ( self : Optional[Any] ): torch.manual_seed(0 ) _UpperCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_A , ) _UpperCamelCase = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , ) torch.manual_seed(0 ) _UpperCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) _UpperCamelCase = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , ) _UpperCamelCase = ClapTextModelWithProjection(_A ) _UpperCamelCase = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 ) _UpperCamelCase = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_A , ) _UpperCamelCase = SpeechTaHifiGan(_A ) _UpperCamelCase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''vocoder''': vocoder, } return components def UpperCamelCase_ ( self : Optional[int] , _A : int , _A : Optional[int]=0 ): if str(_A ).startswith('''mps''' ): _UpperCamelCase = torch.manual_seed(_A ) else: _UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A ) _UpperCamelCase = { '''prompt''': '''A hammer hitting a wooden surface''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, } return inputs def UpperCamelCase_ ( self : Any ): _UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = AudioLDMPipeline(**_A ) _UpperCamelCase = audioldm_pipe.to(_A ) audioldm_pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = self.get_dummy_inputs(_A ) _UpperCamelCase = audioldm_pipe(**_A ) _UpperCamelCase = output.audios[0] assert audio.ndim == 1 assert len(_A ) == 256 _UpperCamelCase = audio[:10] _UpperCamelCase = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = AudioLDMPipeline(**_A ) _UpperCamelCase = audioldm_pipe.to(_A ) _UpperCamelCase = audioldm_pipe.to(_A ) audioldm_pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = self.get_dummy_inputs(_A ) _UpperCamelCase = 3 * [inputs['''prompt''']] # forward _UpperCamelCase = audioldm_pipe(**_A ) _UpperCamelCase = output.audios[0] _UpperCamelCase = self.get_dummy_inputs(_A ) _UpperCamelCase = 3 * [inputs.pop('''prompt''' )] _UpperCamelCase = audioldm_pipe.tokenizer( _A , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_A , return_tensors='''pt''' , ) _UpperCamelCase = text_inputs['''input_ids'''].to(_A ) _UpperCamelCase = audioldm_pipe.text_encoder( _A , ) _UpperCamelCase = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state _UpperCamelCase = F.normalize(_A , dim=-1 ) _UpperCamelCase = prompt_embeds # forward _UpperCamelCase = audioldm_pipe(**_A ) _UpperCamelCase = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = AudioLDMPipeline(**_A ) _UpperCamelCase = audioldm_pipe.to(_A ) _UpperCamelCase = audioldm_pipe.to(_A ) audioldm_pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = self.get_dummy_inputs(_A ) _UpperCamelCase = 3 * ['''this is a negative prompt'''] _UpperCamelCase = negative_prompt _UpperCamelCase = 3 * [inputs['''prompt''']] # forward _UpperCamelCase = audioldm_pipe(**_A ) _UpperCamelCase = output.audios[0] _UpperCamelCase = self.get_dummy_inputs(_A ) _UpperCamelCase = 3 * [inputs.pop('''prompt''' )] _UpperCamelCase = [] for p in [prompt, negative_prompt]: _UpperCamelCase = audioldm_pipe.tokenizer( _A , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_A , return_tensors='''pt''' , ) _UpperCamelCase = text_inputs['''input_ids'''].to(_A ) _UpperCamelCase = audioldm_pipe.text_encoder( _A , ) _UpperCamelCase = text_embeds.text_embeds # additional L_2 normalization over each hidden-state _UpperCamelCase = F.normalize(_A , dim=-1 ) embeds.append(_A ) _UpperCamelCase , _UpperCamelCase = embeds # forward _UpperCamelCase = audioldm_pipe(**_A ) _UpperCamelCase = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def UpperCamelCase_ ( self : str ): _UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = PNDMScheduler(skip_prk_steps=_A ) _UpperCamelCase = AudioLDMPipeline(**_A ) _UpperCamelCase = audioldm_pipe.to(_A ) audioldm_pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = self.get_dummy_inputs(_A ) _UpperCamelCase = '''egg cracking''' _UpperCamelCase = audioldm_pipe(**_A , negative_prompt=_A ) _UpperCamelCase = output.audios[0] assert audio.ndim == 1 assert len(_A ) == 256 _UpperCamelCase = audio[:10] _UpperCamelCase = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = PNDMScheduler(skip_prk_steps=_A ) _UpperCamelCase = AudioLDMPipeline(**_A ) _UpperCamelCase = audioldm_pipe.to(_A ) audioldm_pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = '''A hammer hitting a wooden surface''' # test num_waveforms_per_prompt=1 (default) _UpperCamelCase = audioldm_pipe(_A , num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts _UpperCamelCase = 2 _UpperCamelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt _UpperCamelCase = 2 _UpperCamelCase = audioldm_pipe(_A , num_inference_steps=2 , num_waveforms_per_prompt=_A ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts _UpperCamelCase = 2 _UpperCamelCase = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_A ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = AudioLDMPipeline(**_A ) _UpperCamelCase = audioldm_pipe.to(_A ) audioldm_pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = audioldm_pipe.vocoder.config.sampling_rate _UpperCamelCase = self.get_dummy_inputs(_A ) _UpperCamelCase = audioldm_pipe(audio_length_in_s=0.016 , **_A ) _UpperCamelCase = output.audios[0] assert audio.ndim == 1 assert len(_A ) / vocoder_sampling_rate == 0.016 _UpperCamelCase = audioldm_pipe(audio_length_in_s=0.032 , **_A ) _UpperCamelCase = output.audios[0] assert audio.ndim == 1 assert len(_A ) / vocoder_sampling_rate == 0.032 def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = AudioLDMPipeline(**_A ) _UpperCamelCase = audioldm_pipe.to(_A ) audioldm_pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = ['''hey'''] _UpperCamelCase = audioldm_pipe(_A , num_inference_steps=1 ) _UpperCamelCase = output.audios.shape assert audio_shape == (1, 256) _UpperCamelCase = audioldm_pipe.vocoder.config config.model_in_dim *= 2 _UpperCamelCase = SpeechTaHifiGan(_A ).to(_A ) _UpperCamelCase = audioldm_pipe(_A , num_inference_steps=1 ) _UpperCamelCase = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def UpperCamelCase_ ( self : Optional[int] ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_A ) def UpperCamelCase_ ( self : str ): self._test_inference_batch_single_identical(test_mean_pixel_difference=_A ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase_ ( self : Union[str, Any] ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_A ) @slow class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Optional[int] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self : List[str] , _A : Union[str, Any] , _A : List[str]="cpu" , _A : Any=torch.floataa , _A : Dict=0 ): _UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A ) _UpperCamelCase = np.random.RandomState(_A ).standard_normal((1, 8, 128, 16) ) _UpperCamelCase = torch.from_numpy(_A ).to(device=_A , dtype=_A ) _UpperCamelCase = { '''prompt''': '''A hammer hitting a wooden surface''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 2.5, } return inputs def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' ) _UpperCamelCase = audioldm_pipe.to(_A ) audioldm_pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = self.get_inputs(_A ) _UpperCamelCase = 25 _UpperCamelCase = audioldm_pipe(**_A ).audios[0] assert audio.ndim == 1 assert len(_A ) == 8_1920 _UpperCamelCase = audio[7_7230:7_7240] _UpperCamelCase = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) _UpperCamelCase = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' ) _UpperCamelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) _UpperCamelCase = audioldm_pipe.to(_A ) audioldm_pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = self.get_inputs(_A ) _UpperCamelCase = audioldm_pipe(**_A ).audios[0] assert audio.ndim == 1 assert len(_A ) == 8_1920 _UpperCamelCase = audio[2_7780:2_7790] _UpperCamelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] ) _UpperCamelCase = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
721
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = "RegNetConfig" # Base docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = [1, 1_088, 7, 7] # Image classification docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = "tabby, tabby cat" _lowerCAmelCase = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , **_A : Any , ): super().__init__(**_A ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=_A , strides=_A , padding='''VALID''' , groups=_A , use_bias=_A , name='''convolution''' , ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) _UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity def UpperCamelCase_ ( self : Any , _A : Any ): _UpperCamelCase = self.convolution(self.padding(_A ) ) _UpperCamelCase = self.normalization(_A ) _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] , _A : RegNetConfig , **_A : Any ): super().__init__(**_A ) _UpperCamelCase = config.num_channels _UpperCamelCase = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ): _UpperCamelCase = shape_list(_A )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _UpperCamelCase = tf.transpose(_A , perm=(0, 2, 3, 1) ) _UpperCamelCase = self.embedder(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 2 , **_A : Optional[Any] ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=1 , strides=_A , use_bias=_A , name='''convolution''' ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def UpperCamelCase_ ( self : str , _A : tf.Tensor , _A : bool = False ): return self.normalization(self.convolution(_A ) , training=_A ) class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Dict , _A : int , _A : int , **_A : Dict ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) _UpperCamelCase = [ tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def UpperCamelCase_ ( self : List[str] , _A : List[Any] ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] _UpperCamelCase = self.pooler(_A ) for layer_module in self.attention: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = hidden_state * pooled return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : str ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.2''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Dict , _A : Tuple ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : int ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.3''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Tuple , _A : List[Any] ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Tuple , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , **_A : Union[str, Any] ): super().__init__(**_A ) _UpperCamelCase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer _UpperCamelCase = [ # downsampling is done in the first layer with stride of 2 layer(_A , _A , _A , stride=_A , name='''layers.0''' ), *[layer(_A , _A , _A , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[int] ): for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , **_A : List[str] ): super().__init__(**_A ) _UpperCamelCase = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) _UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_A , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_A , _A , _A , depth=_A , name=F"""stages.{i+1}""" ) ) def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : bool = False , _A : bool = True ): _UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) _UpperCamelCase = stage_module(_A ) if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A ) @keras_serializable class lowerCAmelCase_ ( tf.keras.layers.Layer ): UpperCAmelCase = RegNetConfig def __init__( self : int , _A : Tuple , **_A : int ): super().__init__(**_A ) _UpperCamelCase = config _UpperCamelCase = TFRegNetEmbeddings(_A , name='''embedder''' ) _UpperCamelCase = TFRegNetEncoder(_A , name='''encoder''' ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) @unpack_inputs def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.embedder(_A , training=_A ) _UpperCamelCase = self.encoder( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = encoder_outputs[0] _UpperCamelCase = self.pooler(_A ) # Change to NCHW output format have uniformity in the modules _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _UpperCamelCase = tuple([tf.transpose(_A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_A , pooler_output=_A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = RegNetConfig UpperCAmelCase = "regnet" UpperCAmelCase = "pixel_values" @property def UpperCamelCase_ ( self : Tuple ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} _lowerCAmelCase = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n" _lowerCAmelCase = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top.", __lowercase, ) class lowerCAmelCase_ ( __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Optional[int] , **_A : Tuple ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase_ ( self : Any , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[int]=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( pixel_values=_A , output_hidden_states=_A , return_dict=_A , training=_A , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", __lowercase, ) class lowerCAmelCase_ ( __lowercase, __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Any , **_A : int ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = config.num_labels _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) # classification head _UpperCamelCase = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase_ ( self : str , _A : tf.Tensor = None , _A : tf.Tensor = None , _A : bool = None , _A : bool = None , _A : Any=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCamelCase = self.classifier[0](_A ) _UpperCamelCase = self.classifier[1](_A ) _UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=_A , logits=_A ) if not return_dict: _UpperCamelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
71
0
'''simple docstring''' def _snake_case ( __snake_case = 4000000 ): _UpperCamelCase = [0, 1] _UpperCamelCase = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 _UpperCamelCase = 0 for j in range(len(__snake_case ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f'{solution() = }')
700
from sklearn.metrics import mean_squared_error import datasets _lowerCAmelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" _lowerCAmelCase = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n" _lowerCAmelCase = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def UpperCamelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def UpperCamelCase_ ( self : Dict ): if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : List[str] , _A : Dict=None , _A : List[str]="uniform_average" , _A : int=True ): _UpperCamelCase = mean_squared_error( _A , _A , sample_weight=_A , multioutput=_A , squared=_A ) return {"mse": mse}
71
0
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = "RegNetConfig" # Base docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = [1, 1_088, 7, 7] # Image classification docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = "tabby, tabby cat" _lowerCAmelCase = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , **_A : Any , ): super().__init__(**_A ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=_A , strides=_A , padding='''VALID''' , groups=_A , use_bias=_A , name='''convolution''' , ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) _UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity def UpperCamelCase_ ( self : Any , _A : Any ): _UpperCamelCase = self.convolution(self.padding(_A ) ) _UpperCamelCase = self.normalization(_A ) _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] , _A : RegNetConfig , **_A : Any ): super().__init__(**_A ) _UpperCamelCase = config.num_channels _UpperCamelCase = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ): _UpperCamelCase = shape_list(_A )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _UpperCamelCase = tf.transpose(_A , perm=(0, 2, 3, 1) ) _UpperCamelCase = self.embedder(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 2 , **_A : Optional[Any] ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=1 , strides=_A , use_bias=_A , name='''convolution''' ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def UpperCamelCase_ ( self : str , _A : tf.Tensor , _A : bool = False ): return self.normalization(self.convolution(_A ) , training=_A ) class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Dict , _A : int , _A : int , **_A : Dict ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) _UpperCamelCase = [ tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def UpperCamelCase_ ( self : List[str] , _A : List[Any] ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] _UpperCamelCase = self.pooler(_A ) for layer_module in self.attention: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = hidden_state * pooled return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : str ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.2''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Dict , _A : Tuple ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : int ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.3''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Tuple , _A : List[Any] ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Tuple , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , **_A : Union[str, Any] ): super().__init__(**_A ) _UpperCamelCase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer _UpperCamelCase = [ # downsampling is done in the first layer with stride of 2 layer(_A , _A , _A , stride=_A , name='''layers.0''' ), *[layer(_A , _A , _A , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[int] ): for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , **_A : List[str] ): super().__init__(**_A ) _UpperCamelCase = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) _UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_A , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_A , _A , _A , depth=_A , name=F"""stages.{i+1}""" ) ) def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : bool = False , _A : bool = True ): _UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) _UpperCamelCase = stage_module(_A ) if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A ) @keras_serializable class lowerCAmelCase_ ( tf.keras.layers.Layer ): UpperCAmelCase = RegNetConfig def __init__( self : int , _A : Tuple , **_A : int ): super().__init__(**_A ) _UpperCamelCase = config _UpperCamelCase = TFRegNetEmbeddings(_A , name='''embedder''' ) _UpperCamelCase = TFRegNetEncoder(_A , name='''encoder''' ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) @unpack_inputs def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.embedder(_A , training=_A ) _UpperCamelCase = self.encoder( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = encoder_outputs[0] _UpperCamelCase = self.pooler(_A ) # Change to NCHW output format have uniformity in the modules _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _UpperCamelCase = tuple([tf.transpose(_A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_A , pooler_output=_A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = RegNetConfig UpperCAmelCase = "regnet" UpperCAmelCase = "pixel_values" @property def UpperCamelCase_ ( self : Tuple ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} _lowerCAmelCase = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n" _lowerCAmelCase = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top.", __lowercase, ) class lowerCAmelCase_ ( __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Optional[int] , **_A : Tuple ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase_ ( self : Any , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[int]=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( pixel_values=_A , output_hidden_states=_A , return_dict=_A , training=_A , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", __lowercase, ) class lowerCAmelCase_ ( __lowercase, __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Any , **_A : int ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = config.num_labels _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) # classification head _UpperCamelCase = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase_ ( self : str , _A : tf.Tensor = None , _A : tf.Tensor = None , _A : bool = None , _A : bool = None , _A : Any=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCamelCase = self.classifier[0](_A ) _UpperCamelCase = self.classifier[1](_A ) _UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=_A , logits=_A ) if not return_dict: _UpperCamelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
701
import os import re import shutil import sys import tempfile import unittest import black _lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) _UpperCamelCase = self.diffusers_dir shutil.copy( os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def UpperCamelCase_ ( self : Union[str, Any] , _A : Tuple , _A : Optional[Any] , _A : Dict , _A : List[str]=None ): _UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: _UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result _UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) _UpperCamelCase = black.format_str(_A , mode=_A ) _UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(_A , '''w''' , newline='''\n''' ) as f: f.write(_A ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_A ) with open(_A , '''r''' ) as f: self.assertTrue(f.read() , _A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(_A , _A ) def UpperCamelCase_ ( self : Optional[Any] ): # Base copy consistency self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , ) # Copy consistency with a really long name _UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
71
0
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _lowerCAmelCase = { "configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"], "tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoXJapaneseForCausalLM", "GPTNeoXJapaneseLayer", "GPTNeoXJapaneseModel", "GPTNeoXJapanesePreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
702
from __future__ import annotations import math class lowerCAmelCase_ : def __init__( self : int , _A : int ): _UpperCamelCase = size # approximate the overall size of segment tree with given value _UpperCamelCase = [0 for i in range(0 , 4 * size )] # create array to store lazy update _UpperCamelCase = [0 for i in range(0 , 4 * size )] _UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update def UpperCamelCase_ ( self : str , _A : int ): return idx * 2 def UpperCamelCase_ ( self : Any , _A : int ): return idx * 2 + 1 def UpperCamelCase_ ( self : Union[str, Any] , _A : int , _A : int , _A : int , _A : list[int] ): if left_element == right_element: _UpperCamelCase = a[left_element - 1] else: _UpperCamelCase = (left_element + right_element) // 2 self.build(self.left(_A ) , _A , _A , _A ) self.build(self.right(_A ) , mid + 1 , _A , _A ) _UpperCamelCase = max( self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] ) def UpperCamelCase_ ( self : Tuple , _A : int , _A : int , _A : int , _A : int , _A : int , _A : int ): if self.flag[idx] is True: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = False if left_element != right_element: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = self.lazy[idx] _UpperCamelCase = True _UpperCamelCase = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: _UpperCamelCase = val if left_element != right_element: _UpperCamelCase = val _UpperCamelCase = val _UpperCamelCase = True _UpperCamelCase = True return True _UpperCamelCase = (left_element + right_element) // 2 self.update(self.left(_A ) , _A , _A , _A , _A , _A ) self.update(self.right(_A ) , mid + 1 , _A , _A , _A , _A ) _UpperCamelCase = max( self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] ) return True def UpperCamelCase_ ( self : Any , _A : int , _A : int , _A : int , _A : int , _A : int ): if self.flag[idx] is True: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = False if left_element != right_element: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = self.lazy[idx] _UpperCamelCase = True _UpperCamelCase = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] _UpperCamelCase = (left_element + right_element) // 2 _UpperCamelCase = self.query(self.left(_A ) , _A , _A , _A , _A ) _UpperCamelCase = self.query(self.right(_A ) , mid + 1 , _A , _A , _A ) return max(_A , _A ) def __str__( self : Tuple ): return str([self.query(1 , 1 , self.size , _A , _A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": _lowerCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] _lowerCAmelCase = 15 _lowerCAmelCase = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
71
0
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def _snake_case ( __snake_case ): if isinstance(__snake_case , collections.abc.Iterable ): return x return (x, x) @require_tf class lowerCAmelCase_ : def UpperCamelCase_ ( self : List[str] , _A : Any , _A : Dict ): pass def UpperCamelCase_ ( self : List[Any] ): pass def UpperCamelCase_ ( self : Optional[Any] ): pass def UpperCamelCase_ ( self : int , _A : Union[str, Any] , _A : Any , _A : Optional[Any] , _A : Optional[Any] , _A : str=None , **_A : Union[str, Any] ): _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A ) _UpperCamelCase = TFVisionTextDualEncoderModel(_A ) _UpperCamelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) ) def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : List[Any] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple=None , **_A : Optional[Any] ): _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(_A , _A ) _UpperCamelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) _UpperCamelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def UpperCamelCase_ ( self : List[Any] , _A : Any , _A : Tuple , _A : List[Any] , _A : Dict , _A : Optional[Any]=None , **_A : Union[str, Any] ): _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(_A , _A ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_A ) _UpperCamelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def UpperCamelCase_ ( self : int , _A : List[Any] , _A : List[Any] , _A : Tuple , _A : int , _A : str=None , **_A : Any ): _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(_A , _A ) _UpperCamelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) _UpperCamelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) _UpperCamelCase = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A ) _UpperCamelCase = TFVisionTextDualEncoderModel.from_pretrained(_A ) _UpperCamelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) _UpperCamelCase = after_output[0].numpy() _UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1e-5 ) def UpperCamelCase_ ( self : Dict , _A : int , _A : Optional[Any] , _A : Optional[Any] , _A : Any , _A : Dict=None , **_A : str ): _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(_A , _A ) _UpperCamelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) _UpperCamelCase = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A ) _UpperCamelCase = output.vision_model_output.attentions self.assertEqual(len(_A ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _UpperCamelCase = to_atuple(vision_model.config.image_size ) _UpperCamelCase = to_atuple(vision_model.config.patch_size ) _UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _UpperCamelCase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _UpperCamelCase = output.text_model_output.attentions self.assertEqual(len(_A ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def UpperCamelCase_ ( self : Union[str, Any] , _A : np.ndarray , _A : np.ndarray , _A : float ): _UpperCamelCase = np.abs((a - b) ).max() self.assertLessEqual(_A , _A , F"""Difference between torch and flax is {diff} (>= {tol}).""" ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**_A ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_A ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_A ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = self.prepare_config_and_inputs() self.check_save_load(**_A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_A ) @slow def UpperCamelCase_ ( self : str ): _UpperCamelCase , _UpperCamelCase = self.get_pretrained_model_and_inputs() _UpperCamelCase = model_a(**_A ) _UpperCamelCase = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_A ) _UpperCamelCase = TFVisionTextDualEncoderModel.from_pretrained(_A ) _UpperCamelCase = model_a(**_A ) _UpperCamelCase = after_outputs[0].numpy() _UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1e-5 ) @require_tf class lowerCAmelCase_ ( __lowercase, unittest.TestCase ): def UpperCamelCase_ ( self : str ): _UpperCamelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' ) _UpperCamelCase = 13 _UpperCamelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) _UpperCamelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) _UpperCamelCase = random_attention_mask([batch_size, 4] ) _UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def UpperCamelCase_ ( self : Any , _A : Optional[int] , _A : Tuple ): _UpperCamelCase = TFViTModel(_A , name='''vision_model''' ) _UpperCamelCase = TFBertModel(_A , name='''text_model''' ) return vision_model, text_model def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = TFViTModelTester(self ) _UpperCamelCase = TFBertModelTester(self ) _UpperCamelCase = vit_model_tester.prepare_config_and_inputs() _UpperCamelCase = bert_model_tester.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = vision_config_and_inputs ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class lowerCAmelCase_ ( __lowercase, unittest.TestCase ): def UpperCamelCase_ ( self : Union[str, Any] ): # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. _UpperCamelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' ) _UpperCamelCase = 13 _UpperCamelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) _UpperCamelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) _UpperCamelCase = random_attention_mask([batch_size, 4] ) _UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def UpperCamelCase_ ( self : Tuple , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Tuple , _A : str=None , **_A : Union[str, Any] ): _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(_A , _A ) _UpperCamelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A ) _UpperCamelCase = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A ) _UpperCamelCase = output.vision_model_output.attentions self.assertEqual(len(_A ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) _UpperCamelCase = to_atuple(vision_model.config.image_size ) _UpperCamelCase = to_atuple(vision_model.config.patch_size ) _UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _UpperCamelCase = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _UpperCamelCase = output.text_model_output.attentions self.assertEqual(len(_A ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def UpperCamelCase_ ( self : str , _A : Optional[int] , _A : Tuple ): _UpperCamelCase = TFDeiTModel(_A , name='''vision_model''' ) _UpperCamelCase = TFRobertaModel(_A , name='''text_model''' ) return vision_model, text_model def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = TFDeiTModelTester(self ) _UpperCamelCase = TFRobertaModelTester(self ) _UpperCamelCase = vit_model_tester.prepare_config_and_inputs() _UpperCamelCase = bert_model_tester.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = vision_config_and_inputs ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class lowerCAmelCase_ ( __lowercase, unittest.TestCase ): def UpperCamelCase_ ( self : str ): _UpperCamelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' ) _UpperCamelCase = 13 _UpperCamelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) _UpperCamelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) _UpperCamelCase = random_attention_mask([batch_size, 4] ) _UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def UpperCamelCase_ ( self : List[Any] , _A : Any , _A : Tuple ): _UpperCamelCase = TFCLIPVisionModel(_A , name='''vision_model''' ) _UpperCamelCase = TFBertModel(_A , name='''text_model''' ) return vision_model, text_model def UpperCamelCase_ ( self : int ): _UpperCamelCase = TFCLIPVisionModelTester(self ) _UpperCamelCase = TFBertModelTester(self ) _UpperCamelCase = clip_model_tester.prepare_config_and_inputs() _UpperCamelCase = bert_model_tester.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase = vision_config_and_inputs ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self : int ): _UpperCamelCase = TFVisionTextDualEncoderModel.from_pretrained( '''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=_A ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _UpperCamelCase = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] , images=_A , padding=_A , return_tensors='''np''' ) _UpperCamelCase = model(**_A ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) _UpperCamelCase = np.array([[1.228_4727, 0.310_4122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _A , atol=1e-3 ) )
703
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase = { "configuration_jukebox": [ "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "JukeboxConfig", "JukeboxPriorConfig", "JukeboxVQVAEConfig", ], "tokenization_jukebox": ["JukeboxTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST", "JukeboxModel", "JukeboxPreTrainedModel", "JukeboxVQVAE", "JukeboxPrior", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
71
0
from math import factorial def _snake_case ( __snake_case , __snake_case ): # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError('''Please enter positive integers for n and k where n >= k''' ) return factorial(__snake_case ) // (factorial(__snake_case ) * factorial(n - k )) if __name__ == "__main__": print( "The number of five-card hands possible from a standard", f'fifty-two card deck is: {combinations(52, 5)}\n', ) print( "If a class of 40 students must be arranged into groups of", f'4 for group projects, there are {combinations(40, 4)} ways', "to arrange them.\n", ) print( "If 10 teams are competing in a Formula One race, there", f'are {combinations(10, 3)} ways that first, second and', "third place can be awarded.", )
704
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class lowerCAmelCase_ ( __lowercase ): def __init__( self : int , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : str , ): super().__init__( _A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , ) _UpperCamelCase = field _UpperCamelCase = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths} _UpperCamelCase = Json( cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , ) def UpperCamelCase_ ( self : List[str] ): # Build iterable dataset if self.streaming: _UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None self.builder.download_and_prepare( download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , ) _UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=_A , in_memory=self.keep_in_memory ) return dataset class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : List[str] , ): if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) _UpperCamelCase = dataset _UpperCamelCase = path_or_buf _UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _UpperCamelCase = num_proc _UpperCamelCase = '''utf-8''' _UpperCamelCase = to_json_kwargs def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = self.to_json_kwargs.pop('''path_or_buf''' , _A ) _UpperCamelCase = self.to_json_kwargs.pop('''orient''' , '''records''' ) _UpperCamelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False ) _UpperCamelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True ) _UpperCamelCase = self.to_json_kwargs.pop('''compression''' , _A ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , '''wb''' , compression=_A ) as buffer: _UpperCamelCase = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F"""The compression parameter is not supported when writing to a buffer, but compression={compression}""" ''' was passed. Please provide a local path instead.''' ) _UpperCamelCase = self._write( file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) return written def UpperCamelCase_ ( self : Any , _A : Optional[Any] ): _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = args _UpperCamelCase = query_table( table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , ) _UpperCamelCase = batch.to_pandas().to_json( path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A ) if not json_str.endswith('''\n''' ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCamelCase_ ( self : int , _A : BinaryIO , _A : Dict , _A : Optional[Any] , _A : Dict , **_A : str , ): _UpperCamelCase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): _UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(_A ) else: _UpperCamelCase , _UpperCamelCase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): written += file_obj.write(_A ) return written
71
0
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class lowerCAmelCase_ ( __lowercase ): def __init__( self : int , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : str , ): super().__init__( _A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , ) _UpperCamelCase = field _UpperCamelCase = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths} _UpperCamelCase = Json( cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , ) def UpperCamelCase_ ( self : List[str] ): # Build iterable dataset if self.streaming: _UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None self.builder.download_and_prepare( download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , ) _UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=_A , in_memory=self.keep_in_memory ) return dataset class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : List[str] , ): if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) _UpperCamelCase = dataset _UpperCamelCase = path_or_buf _UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _UpperCamelCase = num_proc _UpperCamelCase = '''utf-8''' _UpperCamelCase = to_json_kwargs def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = self.to_json_kwargs.pop('''path_or_buf''' , _A ) _UpperCamelCase = self.to_json_kwargs.pop('''orient''' , '''records''' ) _UpperCamelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False ) _UpperCamelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True ) _UpperCamelCase = self.to_json_kwargs.pop('''compression''' , _A ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , '''wb''' , compression=_A ) as buffer: _UpperCamelCase = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F"""The compression parameter is not supported when writing to a buffer, but compression={compression}""" ''' was passed. Please provide a local path instead.''' ) _UpperCamelCase = self._write( file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) return written def UpperCamelCase_ ( self : Any , _A : Optional[Any] ): _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = args _UpperCamelCase = query_table( table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , ) _UpperCamelCase = batch.to_pandas().to_json( path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A ) if not json_str.endswith('''\n''' ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCamelCase_ ( self : int , _A : BinaryIO , _A : Dict , _A : Optional[Any] , _A : Dict , **_A : str , ): _UpperCamelCase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): _UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(_A ) else: _UpperCamelCase , _UpperCamelCase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): written += file_obj.write(_A ) return written
705
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class lowerCAmelCase_ ( enum.Enum ): UpperCAmelCase = 0 UpperCAmelCase = 1 UpperCAmelCase = 2 @add_end_docstrings(__lowercase ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self : Tuple , *_A : List[str] , **_A : str ): super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _UpperCamelCase = None if self.model.config.prefix is not None: _UpperCamelCase = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _UpperCamelCase = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._sanitize_parameters(prefix=_A , **self._forward_params ) _UpperCamelCase = {**self._preprocess_params, **preprocess_params} _UpperCamelCase = {**self._forward_params, **forward_params} def UpperCamelCase_ ( self : Dict , _A : Optional[int]=None , _A : Any=None , _A : Optional[int]=None , _A : List[str]=None , _A : List[Any]=None , _A : int=None , _A : Tuple=None , _A : Optional[Any]=None , **_A : Optional[int] , ): _UpperCamelCase = {} if prefix is not None: _UpperCamelCase = prefix if prefix: _UpperCamelCase = self.tokenizer( _A , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) _UpperCamelCase = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected""" ''' [None, \'hole\']''' ) _UpperCamelCase = handle_long_generation preprocess_params.update(_A ) _UpperCamelCase = generate_kwargs _UpperCamelCase = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) _UpperCamelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) _UpperCamelCase = ReturnType.TENSORS if return_type is not None: _UpperCamelCase = return_type if clean_up_tokenization_spaces is not None: _UpperCamelCase = clean_up_tokenization_spaces if stop_sequence is not None: _UpperCamelCase = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) _UpperCamelCase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCamelCase_ ( self : int , *_A : Union[str, Any] , **_A : Union[str, Any] ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_A , **_A ) def __call__( self : List[str] , _A : str , **_A : Any ): return super().__call__(_A , **_A ) def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : int="" , _A : Optional[Any]=None , **_A : Optional[Any] ): _UpperCamelCase = self.tokenizer( prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) _UpperCamelCase = prompt_text if handle_long_generation == "hole": _UpperCamelCase = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: _UpperCamelCase = generate_kwargs['''max_new_tokens'''] else: _UpperCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _UpperCamelCase = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) _UpperCamelCase = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: _UpperCamelCase = inputs['''attention_mask'''][:, -keep_length:] return inputs def UpperCamelCase_ ( self : Dict , _A : Optional[int] , **_A : str ): _UpperCamelCase = model_inputs['''input_ids'''] _UpperCamelCase = model_inputs.get('''attention_mask''' , _A ) # Allow empty prompts if input_ids.shape[1] == 0: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = 1 else: _UpperCamelCase = input_ids.shape[0] _UpperCamelCase = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _UpperCamelCase = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: _UpperCamelCase = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: _UpperCamelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _UpperCamelCase = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _UpperCamelCase = self.model.generate(input_ids=_A , attention_mask=_A , **_A ) _UpperCamelCase = generated_sequence.shape[0] if self.framework == "pt": _UpperCamelCase = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _UpperCamelCase = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def UpperCamelCase_ ( self : List[str] , _A : Dict , _A : Optional[Any]=ReturnType.FULL_TEXT , _A : Dict=True ): _UpperCamelCase = model_outputs['''generated_sequence'''][0] _UpperCamelCase = model_outputs['''input_ids'''] _UpperCamelCase = model_outputs['''prompt_text'''] _UpperCamelCase = generated_sequence.numpy().tolist() _UpperCamelCase = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _UpperCamelCase = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _UpperCamelCase = self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _UpperCamelCase = 0 else: _UpperCamelCase = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) ) if return_type == ReturnType.FULL_TEXT: _UpperCamelCase = prompt_text + text[prompt_length:] else: _UpperCamelCase = text[prompt_length:] _UpperCamelCase = {'''generated_text''': all_text} records.append(_A ) return records
71
0
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowerCAmelCase_ ( __lowercase ): def __init__( self : Tuple , _A : Union[str, "sqlalchemy.sql.Selectable"] , _A : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _A : Optional[Features] = None , _A : str = None , _A : bool = False , **_A : Tuple , ): super().__init__(features=_A , cache_dir=_A , keep_in_memory=_A , **_A ) _UpperCamelCase = Sql( cache_dir=_A , features=_A , sql=_A , con=_A , **_A , ) def UpperCamelCase_ ( self : Any ): _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None self.builder.download_and_prepare( download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , ) # Build dataset for splits _UpperCamelCase = self.builder.as_dataset( split='''train''' , verification_mode=_A , in_memory=self.keep_in_memory ) return dataset class lowerCAmelCase_ : def __init__( self : Union[str, Any] , _A : Dataset , _A : str , _A : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : Optional[Any] , ): if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) _UpperCamelCase = dataset _UpperCamelCase = name _UpperCamelCase = con _UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _UpperCamelCase = num_proc _UpperCamelCase = to_sql_kwargs def UpperCamelCase_ ( self : Any ): _UpperCamelCase = self.to_sql_kwargs.pop('''sql''' , _A ) _UpperCamelCase = self.to_sql_kwargs.pop('''con''' , _A ) _UpperCamelCase = self.to_sql_kwargs.pop('''index''' , _A ) _UpperCamelCase = self._write(index=_A , **self.to_sql_kwargs ) return written def UpperCamelCase_ ( self : int , _A : Optional[Any] ): _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = args _UpperCamelCase = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs _UpperCamelCase = query_table( table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , ) _UpperCamelCase = batch.to_pandas() _UpperCamelCase = df.to_sql(self.name , self.con , index=_A , **_A ) return num_rows or len(_A ) def UpperCamelCase_ ( self : List[str] , _A : Any , **_A : Optional[Any] ): _UpperCamelCase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _UpperCamelCase , _UpperCamelCase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
706
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A ) _UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids _UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids _UpperCamelCase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss _UpperCamelCase = -(labels.shape[-1] * loss.item()) _UpperCamelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
71
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = tempfile.mkdtemp() # fmt: off _UpperCamelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on _UpperCamelCase = dict(zip(_A , range(len(_A ) ) ) ) _UpperCamelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] _UpperCamelCase = {'''unk_token''': '''<unk>'''} _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_A ) ) _UpperCamelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } _UpperCamelCase = os.path.join(self.tmpdirname , _A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_A , _A ) def UpperCamelCase_ ( self : str , **_A : Union[str, Any] ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : Union[str, Any] , **_A : int ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : Optional[Any] , **_A : int ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : int ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _UpperCamelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = self.get_rust_tokenizer() _UpperCamelCase = self.get_image_processor() _UpperCamelCase = CLIPProcessor(tokenizer=_A , image_processor=_A ) processor_slow.save_pretrained(self.tmpdirname ) _UpperCamelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_A ) _UpperCamelCase = CLIPProcessor(tokenizer=_A , image_processor=_A ) processor_fast.save_pretrained(self.tmpdirname ) _UpperCamelCase = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _A ) self.assertIsInstance(processor_fast.tokenizer , _A ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _A ) self.assertIsInstance(processor_fast.image_processor , _A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCamelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 ) _UpperCamelCase = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = CLIPProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = image_processor(_A , return_tensors='''np''' ) _UpperCamelCase = processor(images=_A , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = CLIPProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = processor(text=_A ) _UpperCamelCase = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase_ ( self : str ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = CLIPProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_A ): processor() def UpperCamelCase_ ( self : str ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = CLIPProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCamelCase = processor.batch_decode(_A ) _UpperCamelCase = tokenizer.batch_decode(_A ) self.assertListEqual(_A , _A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = CLIPProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
707
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _lowerCAmelCase = logging.getLogger(__name__) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether tp freeze the encoder."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) UpperCAmelCase = field( default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, ) UpperCAmelCase = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field( default=142, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) UpperCAmelCase = field( default=142, metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field(default=-1, metadata={"help": "# training examples. -1 means use all."} ) UpperCAmelCase = field(default=-1, metadata={"help": "# validation examples. -1 means use all."} ) UpperCAmelCase = field(default=-1, metadata={"help": "# test examples. -1 means use all."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Source language id for translation."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Target language id for translation."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "# num_beams to use for evaluation."} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, ) def _snake_case ( __snake_case , __snake_case , __snake_case ): logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(__snake_case , os.path.join(__snake_case , f"""{split}_results.json""" ) ) def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses() check_output_dir(__snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __snake_case ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__snake_case , __snake_case , __snake_case ): assert hasattr(__snake_case , __snake_case ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) ) _UpperCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__snake_case , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__snake_case , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _UpperCamelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__snake_case , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__snake_case , __snake_case ): _UpperCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _UpperCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__snake_case ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _UpperCamelCase = SeqaSeqDataset # Get datasets _UpperCamelCase = ( dataset_class( __snake_case , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) _UpperCamelCase = ( dataset_class( __snake_case , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _UpperCamelCase = ( dataset_class( __snake_case , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer _UpperCamelCase = ( build_compute_metrics_fn(data_args.task , __snake_case ) if training_args.predict_with_generate else None ) _UpperCamelCase = SeqaSeqTrainer( model=__snake_case , args=__snake_case , data_args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , data_collator=SeqaSeqDataCollator( __snake_case , __snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__snake_case , tokenizer=__snake_case , ) _UpperCamelCase = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) _UpperCamelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _UpperCamelCase = train_result.metrics _UpperCamelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _UpperCamelCase = trainer.evaluate(metric_key_prefix='''val''' ) _UpperCamelCase = data_args.n_val _UpperCamelCase = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) if training_args.do_predict: logger.info('''*** Predict ***''' ) _UpperCamelCase = trainer.predict(test_dataset=__snake_case , metric_key_prefix='''test''' ) _UpperCamelCase = test_output.metrics _UpperCamelCase = data_args.n_test if trainer.is_world_process_zero(): _UpperCamelCase = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) if training_args.predict_with_generate: _UpperCamelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case ) _UpperCamelCase = lmap(str.strip , __snake_case ) write_txt_file(__snake_case , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__snake_case , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def _snake_case ( __snake_case ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
71
0
from __future__ import annotations from typing import Generic, TypeVar _lowerCAmelCase = TypeVar("T") class lowerCAmelCase_ ( Generic[T] ): def __init__( self : Any , _A : T ): _UpperCamelCase = data _UpperCamelCase = self _UpperCamelCase = 0 class lowerCAmelCase_ ( Generic[T] ): def __init__( self : Optional[int] ): # map from node name to the node object _UpperCamelCase = {} def UpperCamelCase_ ( self : str , _A : T ): # create a new set with x as its member _UpperCamelCase = DisjointSetTreeNode(_A ) def UpperCamelCase_ ( self : List[Any] , _A : T ): # find the set x belongs to (with path-compression) _UpperCamelCase = self.map[data] if elem_ref != elem_ref.parent: _UpperCamelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def UpperCamelCase_ ( self : List[Any] , _A : DisjointSetTreeNode[T] , _A : DisjointSetTreeNode[T] ): # helper function for union operation if nodea.rank > nodea.rank: _UpperCamelCase = nodea else: _UpperCamelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def UpperCamelCase_ ( self : List[Any] , _A : T , _A : T ): # merge 2 disjoint sets self.link(self.find_set(_A ) , self.find_set(_A ) ) class lowerCAmelCase_ ( Generic[T] ): def __init__( self : Any ): # connections: map from the node to the neighbouring nodes (with weights) _UpperCamelCase = {} def UpperCamelCase_ ( self : Optional[int] , _A : T ): # add a node ONLY if its not present in the graph if node not in self.connections: _UpperCamelCase = {} def UpperCamelCase_ ( self : Tuple , _A : T , _A : T , _A : int ): # add an edge with the given weight self.add_node(_A ) self.add_node(_A ) _UpperCamelCase = weight _UpperCamelCase = weight def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = [] _UpperCamelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda _A : x[2] ) # creating the disjoint set _UpperCamelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(_A ) # MST generation _UpperCamelCase = 0 _UpperCamelCase = 0 _UpperCamelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = edges[index] index += 1 _UpperCamelCase = disjoint_set.find_set(_A ) _UpperCamelCase = disjoint_set.find_set(_A ) if parent_u != parent_v: num_edges += 1 graph.add_edge(_A , _A , _A ) disjoint_set.union(_A , _A ) return graph
708
from __future__ import annotations import typing from collections import Counter def _snake_case ( __snake_case ): _UpperCamelCase = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(__snake_case , max_perimeter + 1 ): _UpperCamelCase = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(__snake_case ): _UpperCamelCase = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def _snake_case ( __snake_case = 1000 ): _UpperCamelCase = pythagorean_triple(__snake_case ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(f'Perimeter {solution()} has maximum solutions')
71
0
_lowerCAmelCase = { "joule": 1.0, "kilojoule": 1_000, "megajoule": 1_000_000, "gigajoule": 1_000_000_000, "wattsecond": 1.0, "watthour": 3_600, "kilowatthour": 3_600_000, "newtonmeter": 1.0, "calorie_nutr": 4_186.8, "kilocalorie_nutr": 4_186_800.00, "electronvolt": 1.602176634E-19, "britishthermalunit_it": 1_055.05_585, "footpound": 1.355818, } def _snake_case ( __snake_case , __snake_case , __snake_case ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: _UpperCamelCase = ( f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" f"""Valid values are: {", ".join(__snake_case )}""" ) raise ValueError(__snake_case ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
709
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = (DPMSolverSDEScheduler,) UpperCAmelCase = 10 def UpperCamelCase_ ( self : Tuple , **_A : Union[str, Any] ): _UpperCamelCase = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**_A ) return config def UpperCamelCase_ ( self : List[Any] ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def UpperCamelCase_ ( self : List[Any] ): for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def UpperCamelCase_ ( self : List[str] ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_A ) def UpperCamelCase_ ( self : Union[str, Any] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for i, t in enumerate(scheduler.timesteps ): _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2 assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for i, t in enumerate(scheduler.timesteps ): _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2 assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2 assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2 assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3 def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps , device=_A ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2 assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A , use_karras_sigmas=_A ) scheduler.set_timesteps(self.num_inference_steps , device=_A ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for t in scheduler.timesteps: _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
71
0
from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=__lowercase ): UpperCAmelCase = ["keras_nlp"] def __init__( self : Any , *_A : Dict , **_A : List[str] ): requires_backends(self , ['''keras_nlp'''] )
710
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCAmelCase_ : @property def UpperCamelCase_ ( self : Optional[int] ): return self.get_dummy_input() @property def UpperCamelCase_ ( self : Dict ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def UpperCamelCase_ ( self : Union[str, Any] , _A : List[str]=True , _A : Any=False , _A : Union[str, Any]=False , _A : int=False , ): _UpperCamelCase = 4 _UpperCamelCase = 32 _UpperCamelCase = (32, 32) _UpperCamelCase = torch.manual_seed(0 ) _UpperCamelCase = torch.device(_A ) _UpperCamelCase = (batch_size, num_channels) + sizes _UpperCamelCase = randn_tensor(_A , generator=_A , device=_A ) _UpperCamelCase = {'''hidden_states''': hidden_states} if include_temb: _UpperCamelCase = 128 _UpperCamelCase = randn_tensor((batch_size, temb_channels) , generator=_A , device=_A ) if include_res_hidden_states_tuple: _UpperCamelCase = torch.manual_seed(1 ) _UpperCamelCase = (randn_tensor(_A , generator=_A , device=_A ),) if include_encoder_hidden_states: _UpperCamelCase = floats_tensor((batch_size, 32, 32) ).to(_A ) if include_skip_sample: _UpperCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_A , device=_A ) return dummy_input def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = { '''in_channels''': 32, '''out_channels''': 32, '''temb_channels''': 128, } if self.block_type == "up": _UpperCamelCase = 32 if self.block_type == "mid": init_dict.pop('''out_channels''' ) _UpperCamelCase = self.dummy_input return init_dict, inputs_dict def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.block_class(**_A ) unet_block.to(_A ) unet_block.eval() with torch.no_grad(): _UpperCamelCase = unet_block(**_A ) if isinstance(_A , _A ): _UpperCamelCase = output[0] self.assertEqual(output.shape , self.output_shape ) _UpperCamelCase = output[0, -1, -3:, -3:] _UpperCamelCase = torch.tensor(_A ).to(_A ) assert torch_all_close(output_slice.flatten() , _A , atol=5e-3 ) @unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' ) def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.block_class(**_A ) model.to(_A ) model.train() _UpperCamelCase = model(**_A ) if isinstance(_A , _A ): _UpperCamelCase = output[0] _UpperCamelCase = torch.device(_A ) _UpperCamelCase = randn_tensor(output.shape , device=_A ) _UpperCamelCase = torch.nn.functional.mse_loss(_A , _A ) loss.backward()
71
0
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): def __init__( self : Any , _A : List[Any] , _A : List[str]=3 , _A : Union[str, Any]=32 , _A : List[str]=3 , _A : Any=10 , _A : str=[10, 20, 30, 40] , _A : Any=[1, 1, 2, 1] , _A : Any=True , _A : str=True , _A : Dict="relu" , _A : Tuple=3 , _A : List[Any]=None , ): _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = image_size _UpperCamelCase = num_channels _UpperCamelCase = embeddings_size _UpperCamelCase = hidden_sizes _UpperCamelCase = depths _UpperCamelCase = is_training _UpperCamelCase = use_labels _UpperCamelCase = hidden_act _UpperCamelCase = num_labels _UpperCamelCase = scope _UpperCamelCase = len(_A ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCamelCase = self.get_config() return config, pixel_values def UpperCamelCase_ ( self : Union[str, Any] ): return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def UpperCamelCase_ ( self : str , _A : Union[str, Any] , _A : str ): _UpperCamelCase = FlaxRegNetModel(config=_A ) _UpperCamelCase = model(_A ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def UpperCamelCase_ ( self : Optional[int] , _A : Union[str, Any] , _A : str ): _UpperCamelCase = self.num_labels _UpperCamelCase = FlaxRegNetForImageClassification(config=_A ) _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase = config_and_inputs _UpperCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class lowerCAmelCase_ ( __lowercase, unittest.TestCase ): UpperCAmelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = FlaxRegNetModelTester(self ) _UpperCamelCase = ConfigTester(self , config_class=_A , has_text_modality=_A ) def UpperCamelCase_ ( self : Any ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self : List[str] ): return def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCamelCase_ ( self : str ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def UpperCamelCase_ ( self : Union[str, Any] ): pass @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def UpperCamelCase_ ( self : Dict ): pass def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(_A ) _UpperCamelCase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase = [*signature.parameters.keys()] _UpperCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def UpperCamelCase_ ( self : Optional[Any] ): def check_hidden_states_output(_A : Optional[int] , _A : Union[str, Any] , _A : Optional[int] ): _UpperCamelCase = model_class(_A ) _UpperCamelCase = model(**self._prepare_for_class(_A , _A ) ) _UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(_A ) , expected_num_stages + 1 ) _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = True check_hidden_states_output(_A , _A , _A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCamelCase = True check_hidden_states_output(_A , _A , _A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCamelCase = self._prepare_for_class(_A , _A ) _UpperCamelCase = model_class(_A ) @jax.jit def model_jitted(_A : Any , **_A : Dict ): return model(pixel_values=_A , **_A ) with self.subTest('''JIT Enabled''' ): _UpperCamelCase = model_jitted(**_A ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): _UpperCamelCase = model_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) def _snake_case ( ): _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_flax class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self : str ): return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None @slow def UpperCamelCase_ ( self : str ): _UpperCamelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' ) _UpperCamelCase = self.default_image_processor _UpperCamelCase = prepare_img() _UpperCamelCase = image_processor(images=_A , return_tensors='''np''' ) _UpperCamelCase = model(**_A ) # verify the logits _UpperCamelCase = (1, 1000) self.assertEqual(outputs.logits.shape , _A ) _UpperCamelCase = jnp.array([-0.4180, -1.5051, -3.4836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
711
def _snake_case ( __snake_case ): if not isinstance(__snake_case , __snake_case ): raise TypeError('''Input value must be an \'int\' type''' ) _UpperCamelCase = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
71
0
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = (DEISMultistepScheduler,) UpperCAmelCase = (("num_inference_steps", 25),) def UpperCamelCase_ ( self : Tuple , **_A : Optional[int] ): _UpperCamelCase = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, } config.update(**_A ) return config def UpperCamelCase_ ( self : Optional[int] , _A : str=0 , **_A : Optional[Any] ): _UpperCamelCase = dict(self.forward_default_kwargs ) _UpperCamelCase = kwargs.pop('''num_inference_steps''' , _A ) _UpperCamelCase = self.dummy_sample _UpperCamelCase = 0.1 * sample _UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _UpperCamelCase = self.get_scheduler_config(**_A ) _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(_A ) # copy over dummy past residuals _UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_A ) _UpperCamelCase = scheduler_class.from_pretrained(_A ) new_scheduler.set_timesteps(_A ) # copy over dummy past residuals _UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCamelCase , _UpperCamelCase = sample, sample for t in range(_A , time_step + scheduler.config.solver_order + 1 ): _UpperCamelCase = scheduler.step(_A , _A , _A , **_A ).prev_sample _UpperCamelCase = new_scheduler.step(_A , _A , _A , **_A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self : int ): pass def UpperCamelCase_ ( self : Optional[Any] , _A : str=0 , **_A : List[str] ): _UpperCamelCase = dict(self.forward_default_kwargs ) _UpperCamelCase = kwargs.pop('''num_inference_steps''' , _A ) _UpperCamelCase = self.dummy_sample _UpperCamelCase = 0.1 * sample _UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(_A ) # copy over dummy past residuals (must be after setting timesteps) _UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_A ) _UpperCamelCase = scheduler_class.from_pretrained(_A ) # copy over dummy past residuals new_scheduler.set_timesteps(_A ) # copy over dummy past residual (must be after setting timesteps) _UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCamelCase = scheduler.step(_A , _A , _A , **_A ).prev_sample _UpperCamelCase = new_scheduler.step(_A , _A , _A , **_A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self : List[Any] , _A : Any=None , **_A : Optional[Any] ): if scheduler is None: _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config(**_A ) _UpperCamelCase = scheduler_class(**_A ) _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config(**_A ) _UpperCamelCase = scheduler_class(**_A ) _UpperCamelCase = 10 _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter scheduler.set_timesteps(_A ) for i, t in enumerate(scheduler.timesteps ): _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ).prev_sample return sample def UpperCamelCase_ ( self : str ): _UpperCamelCase = dict(self.forward_default_kwargs ) _UpperCamelCase = kwargs.pop('''num_inference_steps''' , _A ) for scheduler_class in self.scheduler_classes: _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) _UpperCamelCase = self.dummy_sample _UpperCamelCase = 0.1 * sample if num_inference_steps is not None and hasattr(_A , '''set_timesteps''' ): scheduler.set_timesteps(_A ) elif num_inference_steps is not None and not hasattr(_A , '''set_timesteps''' ): _UpperCamelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10] _UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order] _UpperCamelCase = scheduler.timesteps[5] _UpperCamelCase = scheduler.timesteps[6] _UpperCamelCase = scheduler.step(_A , _A , _A , **_A ).prev_sample _UpperCamelCase = scheduler.step(_A , _A , _A , **_A ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCamelCase_ ( self : List[Any] ): # make sure that iterating over schedulers with same config names gives same results # for defaults _UpperCamelCase = DEISMultistepScheduler(**self.get_scheduler_config() ) _UpperCamelCase = self.full_loop(scheduler=_A ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1e-3 _UpperCamelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCamelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCamelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCamelCase = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCamelCase = self.full_loop(scheduler=_A ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1e-3 def UpperCamelCase_ ( self : Union[str, Any] ): for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=_A ) def UpperCamelCase_ ( self : Optional[Any] ): self.check_over_configs(thresholding=_A ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_A , prediction_type=_A , sample_max_value=_A , algorithm_type='''deis''' , solver_order=_A , solver_type=_A , ) def UpperCamelCase_ ( self : List[str] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def UpperCamelCase_ ( self : List[Any] ): for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_A , solver_type=_A , prediction_type=_A , algorithm_type=_A , ) _UpperCamelCase = self.full_loop( solver_order=_A , solver_type=_A , prediction_type=_A , algorithm_type=_A , ) assert not torch.isnan(_A ).any(), "Samples have nan numbers" def UpperCamelCase_ ( self : int ): self.check_over_configs(lower_order_final=_A ) self.check_over_configs(lower_order_final=_A ) def UpperCamelCase_ ( self : int ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=_A , time_step=0 ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.full_loop() _UpperCamelCase = torch.mean(torch.abs(_A ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1e-3 def UpperCamelCase_ ( self : Any ): _UpperCamelCase = self.full_loop(prediction_type='''v_prediction''' ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) assert abs(result_mean.item() - 0.091 ) < 1e-3 def UpperCamelCase_ ( self : Any ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config(thresholding=_A , dynamic_thresholding_ratio=0 ) _UpperCamelCase = scheduler_class(**_A ) _UpperCamelCase = 10 _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(_A ) for i, t in enumerate(scheduler.timesteps ): _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ).prev_sample assert sample.dtype == torch.floataa
712
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } _lowerCAmelCase = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): for attribute in key.split('''.''' ): _UpperCamelCase = getattr(__snake_case , __snake_case ) if weight_type is not None: _UpperCamelCase = getattr(__snake_case , __snake_case ).shape else: _UpperCamelCase = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": _UpperCamelCase = value elif weight_type == "weight_g": _UpperCamelCase = value elif weight_type == "weight_v": _UpperCamelCase = value elif weight_type == "bias": _UpperCamelCase = value else: _UpperCamelCase = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = [] _UpperCamelCase = fairseq_model.state_dict() _UpperCamelCase = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight _UpperCamelCase = None for name, value in fairseq_dict.items(): _UpperCamelCase = False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _UpperCamelCase = True elif name.split('''.''' )[0] == "proj": _UpperCamelCase = fairseq_model.proj _UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _UpperCamelCase = True if "*" in mapped_key: _UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2] _UpperCamelCase = mapped_key.replace('''*''' , __snake_case ) if "weight_g" in name: _UpperCamelCase = '''weight_g''' elif "weight_v" in name: _UpperCamelCase = '''weight_v''' elif "bias" in name: _UpperCamelCase = '''bias''' elif "weight" in name: _UpperCamelCase = '''weight''' else: _UpperCamelCase = None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) return proj_weight def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = full_name.split('''conv_layers.''' )[-1] _UpperCamelCase = name.split('''.''' ) _UpperCamelCase = int(items[0] ) _UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _UpperCamelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__snake_case ) def _snake_case ( __snake_case ): _UpperCamelCase , _UpperCamelCase = emb.weight.shape _UpperCamelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case ) _UpperCamelCase = emb.weight.data return lin_layer def _snake_case ( __snake_case ): with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: _UpperCamelCase = f.readlines() _UpperCamelCase = [line.split(''' ''' )[0] for line in lines] _UpperCamelCase = len(__snake_case ) _UpperCamelCase = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ): _UpperCamelCase = WavaVecaConfig.from_pretrained(__snake_case ) _UpperCamelCase = SpeechaTextaConfig.from_pretrained( __snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case ) _UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) _UpperCamelCase = model[0].eval() # set weights for wav2vec2 encoder _UpperCamelCase = WavaVecaModel(__snake_case ) _UpperCamelCase = recursively_load_weights_wavaveca(model.encoder , __snake_case ) _UpperCamelCase = SpeechaTextaForCausalLM(__snake_case ) _UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case ) # set output linear layer unexpected_keys.remove('''embed_out''' ) _UpperCamelCase = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) _UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case ) _UpperCamelCase = False # add projection layer _UpperCamelCase = nn.Parameter(projection_layer.weight ) _UpperCamelCase = nn.Parameter(projection_layer.bias ) _UpperCamelCase = create_vocab_dict(__snake_case ) with open(os.path.join(__snake_case , '''vocab.json''' ) , '''w''' ) as fp: json.dump(__snake_case , __snake_case ) _UpperCamelCase = SpeechaTextaTokenizer(os.path.join(__snake_case , '''vocab.json''' ) ) tokenizer.save_pretrained(__snake_case ) _UpperCamelCase = hf_wavavec.config.to_dict() _UpperCamelCase = tokenizer.pad_token_id _UpperCamelCase = tokenizer.bos_token_id _UpperCamelCase = tokenizer.eos_token_id _UpperCamelCase = '''speech_to_text_2''' _UpperCamelCase = '''wav2vec2''' _UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case ) hf_wavavec.save_pretrained(__snake_case ) feature_extractor.save_pretrained(__snake_case ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") _lowerCAmelCase = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
71
0
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCAmelCase_ : @property def UpperCamelCase_ ( self : Optional[int] ): return self.get_dummy_input() @property def UpperCamelCase_ ( self : Dict ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def UpperCamelCase_ ( self : Union[str, Any] , _A : List[str]=True , _A : Any=False , _A : Union[str, Any]=False , _A : int=False , ): _UpperCamelCase = 4 _UpperCamelCase = 32 _UpperCamelCase = (32, 32) _UpperCamelCase = torch.manual_seed(0 ) _UpperCamelCase = torch.device(_A ) _UpperCamelCase = (batch_size, num_channels) + sizes _UpperCamelCase = randn_tensor(_A , generator=_A , device=_A ) _UpperCamelCase = {'''hidden_states''': hidden_states} if include_temb: _UpperCamelCase = 128 _UpperCamelCase = randn_tensor((batch_size, temb_channels) , generator=_A , device=_A ) if include_res_hidden_states_tuple: _UpperCamelCase = torch.manual_seed(1 ) _UpperCamelCase = (randn_tensor(_A , generator=_A , device=_A ),) if include_encoder_hidden_states: _UpperCamelCase = floats_tensor((batch_size, 32, 32) ).to(_A ) if include_skip_sample: _UpperCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_A , device=_A ) return dummy_input def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = { '''in_channels''': 32, '''out_channels''': 32, '''temb_channels''': 128, } if self.block_type == "up": _UpperCamelCase = 32 if self.block_type == "mid": init_dict.pop('''out_channels''' ) _UpperCamelCase = self.dummy_input return init_dict, inputs_dict def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.block_class(**_A ) unet_block.to(_A ) unet_block.eval() with torch.no_grad(): _UpperCamelCase = unet_block(**_A ) if isinstance(_A , _A ): _UpperCamelCase = output[0] self.assertEqual(output.shape , self.output_shape ) _UpperCamelCase = output[0, -1, -3:, -3:] _UpperCamelCase = torch.tensor(_A ).to(_A ) assert torch_all_close(output_slice.flatten() , _A , atol=5e-3 ) @unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' ) def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.block_class(**_A ) model.to(_A ) model.train() _UpperCamelCase = model(**_A ) if isinstance(_A , _A ): _UpperCamelCase = output[0] _UpperCamelCase = torch.device(_A ) _UpperCamelCase = randn_tensor(output.shape , device=_A ) _UpperCamelCase = torch.nn.functional.mse_loss(_A , _A ) loss.backward()
713
from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[str]=13 , _A : Union[str, Any]=7 , _A : int=True , _A : Optional[int]=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : Optional[int]=99 , _A : Union[str, Any]=32 , _A : Dict=2 , _A : List[Any]=4 , _A : Optional[Any]=37 , _A : int="gelu" , _A : Optional[int]=0.1 , _A : str=0.1 , _A : List[str]=512 , _A : Optional[Any]=16 , _A : Optional[Any]=2 , _A : Optional[int]=0.02 , _A : str=False , _A : int=True , _A : Any="None" , _A : Dict=3 , _A : List[Any]=4 , _A : Optional[Any]=None , ): _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_input_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_labels _UpperCamelCase = num_choices _UpperCamelCase = relative_attention _UpperCamelCase = position_biased_input _UpperCamelCase = pos_att_type _UpperCamelCase = scope def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_input_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCamelCase = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : int , _A : Optional[Any] ): _UpperCamelCase = TFDebertaVaModel(config=_A ) _UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCamelCase = [input_ids, input_mask] _UpperCamelCase = model(_A ) _UpperCamelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self : Dict , _A : Optional[int] , _A : Any , _A : Dict , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] ): _UpperCamelCase = TFDebertaVaForMaskedLM(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self : Dict , _A : Dict , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : Optional[Any] , _A : Tuple , _A : int ): _UpperCamelCase = self.num_labels _UpperCamelCase = TFDebertaVaForSequenceClassification(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : Optional[int] , _A : Any , _A : List[Any] , _A : Dict , _A : Union[str, Any] , _A : List[str] ): _UpperCamelCase = self.num_labels _UpperCamelCase = TFDebertaVaForTokenClassification(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self : Dict , _A : Optional[Any] , _A : Optional[int] , _A : Any , _A : List[str] , _A : str , _A : Optional[int] , _A : str ): _UpperCamelCase = TFDebertaVaForQuestionAnswering(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self : Any ): _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ): UpperCAmelCase = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) UpperCAmelCase = ( { "feature-extraction": TFDebertaVaModel, "fill-mask": TFDebertaVaForMaskedLM, "question-answering": TFDebertaVaForQuestionAnswering, "text-classification": TFDebertaVaForSequenceClassification, "token-classification": TFDebertaVaForTokenClassification, "zero-shot": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = TFDebertaVaModelTester(self ) _UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 ) def UpperCamelCase_ ( self : Any ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) self.assertIsNotNone(_A ) @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def UpperCamelCase_ ( self : List[Any] ): pass @slow def UpperCamelCase_ ( self : int ): _UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) _UpperCamelCase = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) _UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _UpperCamelCase = model(_A , attention_mask=_A )[0] _UpperCamelCase = tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1e-4 )
71
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _lowerCAmelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = ["pixel_values"] def __init__( self : int , _A : bool = True , _A : Dict[str, int] = None , _A : int = 0.9 , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Any , ): super().__init__(**_A ) _UpperCamelCase = size if size is not None else {'''shortest_edge''': 224} _UpperCamelCase = get_size_dict(_A , default_to_square=_A ) _UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _UpperCamelCase = get_size_dict(_A , param_name='''crop_size''' ) _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = crop_pct _UpperCamelCase = resample _UpperCamelCase = do_center_crop _UpperCamelCase = crop_size _UpperCamelCase = do_rescale _UpperCamelCase = rescale_factor _UpperCamelCase = do_normalize _UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCamelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[float] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ): _UpperCamelCase = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(F"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) if crop_pct is not None: if "shortest_edge" in size: _UpperCamelCase = int(size['''shortest_edge'''] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: _UpperCamelCase = int(size['''height'''] / crop_pct ) else: _UpperCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct )) else: raise ValueError('''Invalid size for resize: {}'''.format(_A ) ) _UpperCamelCase = get_resize_output_image_size(_A , size=_A , default_to_square=_A ) else: if "shortest_edge" in size: _UpperCamelCase = get_resize_output_image_size(_A , size=size['''shortest_edge'''] , default_to_square=_A ) elif "height" in size and "width" in size: _UpperCamelCase = (size['''height'''], size['''width''']) else: raise ValueError('''Invalid size for resize: {}'''.format(_A ) ) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def UpperCamelCase_ ( self : List[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ): _UpperCamelCase = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(F"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A ) def UpperCamelCase_ ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ): return rescale(_A , scale=_A , data_format=_A , **_A ) def UpperCamelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ): return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def UpperCamelCase_ ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : int = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Dict , ): _UpperCamelCase = do_resize if do_resize is not None else self.do_resize _UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct _UpperCamelCase = resample if resample is not None else self.resample _UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCamelCase = image_mean if image_mean is not None else self.image_mean _UpperCamelCase = image_std if image_std is not None else self.image_std _UpperCamelCase = size if size is not None else self.size _UpperCamelCase = get_size_dict(_A , default_to_square=_A ) _UpperCamelCase = crop_size if crop_size is not None else self.crop_size _UpperCamelCase = get_size_dict(_A , param_name='''crop_size''' ) _UpperCamelCase = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_pct is None: raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _UpperCamelCase = [to_numpy_array(_A ) for image in images] if do_resize: _UpperCamelCase = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images] if do_center_crop: _UpperCamelCase = [self.center_crop(image=_A , size=_A ) for image in images] if do_rescale: _UpperCamelCase = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: _UpperCamelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] _UpperCamelCase = [to_channel_dimension_format(_A , _A ) for image in images] _UpperCamelCase = {'''pixel_values''': images} return BatchFeature(data=_A , tensor_type=_A )
714
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ): # Return True if there is node that has not iterated. _UpperCamelCase = [False] * len(__snake_case ) _UpperCamelCase = [] queue.append(__snake_case ) _UpperCamelCase = True while queue: _UpperCamelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__snake_case ) _UpperCamelCase = True _UpperCamelCase = u return visited[t] def _snake_case ( __snake_case , __snake_case , __snake_case ): # This array is filled by BFS and to store path _UpperCamelCase = [-1] * (len(__snake_case )) _UpperCamelCase = 0 while bfs(__snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = float('''Inf''' ) _UpperCamelCase = sink while s != source: # Find the minimum value in select path _UpperCamelCase = min(__snake_case , graph[parent[s]][s] ) _UpperCamelCase = parent[s] max_flow += path_flow _UpperCamelCase = sink while v != source: _UpperCamelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCamelCase = parent[v] return max_flow _lowerCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _lowerCAmelCase, _lowerCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
71
0
from abc import ABC, abstractmethod from argparse import ArgumentParser class lowerCAmelCase_ ( __lowercase ): @staticmethod @abstractmethod def UpperCamelCase_ ( _A : ArgumentParser ): raise NotImplementedError() @abstractmethod def UpperCamelCase_ ( self : Tuple ): raise NotImplementedError()
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowerCAmelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST", "UniSpeechForCTC", "UniSpeechForPreTraining", "UniSpeechForSequenceClassification", "UniSpeechModel", "UniSpeechPreTrainedModel", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
71
0
from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class lowerCAmelCase_ ( __lowercase ): def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = SMALL_MODEL_IDENTIFIER _UpperCamelCase = '''pt''' _UpperCamelCase = '''tf''' def UpperCamelCase_ ( self : Any , _A : Tuple ): _UpperCamelCase = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(_A ) def UpperCamelCase_ ( self : Any , _A : Union[str, Any] ): _UpperCamelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_A ) model_tf.save_pretrained(_A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = '''mock_framework''' # Framework provided - return whatever the user provides _UpperCamelCase = FeaturesManager.determine_framework(self.test_model , _A ) self.assertEqual(_A , _A ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_A ) _UpperCamelCase = FeaturesManager.determine_framework(_A , _A ) self.assertEqual(_A , _A ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_A ) _UpperCamelCase = FeaturesManager.determine_framework(_A , _A ) self.assertEqual(_A , _A ) def UpperCamelCase_ ( self : Optional[int] ): # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_A ) _UpperCamelCase = FeaturesManager.determine_framework(_A ) self.assertEqual(_A , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_A ) _UpperCamelCase = FeaturesManager.determine_framework(_A ) self.assertEqual(_A , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(_A ): _UpperCamelCase = FeaturesManager.determine_framework(_A ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = MagicMock(return_value=_A ) with patch('''transformers.onnx.features.is_tf_available''' , _A ): _UpperCamelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_A , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _UpperCamelCase = MagicMock(return_value=_A ) with patch('''transformers.onnx.features.is_torch_available''' , _A ): _UpperCamelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_A , self.framework_tf ) # Both in environment -> use PyTorch _UpperCamelCase = MagicMock(return_value=_A ) _UpperCamelCase = MagicMock(return_value=_A ) with patch('''transformers.onnx.features.is_tf_available''' , _A ), patch( '''transformers.onnx.features.is_torch_available''' , _A ): _UpperCamelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_A , self.framework_pt ) # Both not in environment -> raise error _UpperCamelCase = MagicMock(return_value=_A ) _UpperCamelCase = MagicMock(return_value=_A ) with patch('''transformers.onnx.features.is_tf_available''' , _A ), patch( '''transformers.onnx.features.is_torch_available''' , _A ): with self.assertRaises(_A ): _UpperCamelCase = FeaturesManager.determine_framework(self.test_model )
716
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = tempfile.mkdtemp() # fmt: off _UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _UpperCamelCase = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } _UpperCamelCase = os.path.join(self.tmpdirname , _A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_A , _A ) def UpperCamelCase_ ( self : Tuple , **_A : Optional[Any] ): return BertTokenizer.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : List[Any] , **_A : Union[str, Any] ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : int ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _UpperCamelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = self.get_image_processor() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCamelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = image_processor(_A , return_tensors='''np''' ) _UpperCamelCase = processor(images=_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = processor(text=_A ) _UpperCamelCase = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(_A ): processor() def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCamelCase = processor.batch_decode(_A ) _UpperCamelCase = tokenizer.batch_decode(_A ) self.assertListEqual(_A , _A ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
71
0
import argparse import os import re _lowerCAmelCase = "src/diffusers" # Pattern that looks at the indentation in a line. _lowerCAmelCase = re.compile(r"^(\s*)\S") # Pattern that matches `"key":" and puts `key` in group 0. _lowerCAmelCase = re.compile(r"^\s*\"([^\"]+)\":") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. _lowerCAmelCase = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]") # Pattern that matches `"key",` and puts `key` in group 0. _lowerCAmelCase = re.compile(r"^\s*\"([^\"]+)\",\s*$") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. _lowerCAmelCase = re.compile(r"\[([^\]]+)\]") def _snake_case ( __snake_case ): _UpperCamelCase = _re_indent.search(__snake_case ) return "" if search is None else search.groups()[0] def _snake_case ( __snake_case , __snake_case="" , __snake_case=None , __snake_case=None ): _UpperCamelCase = 0 _UpperCamelCase = code.split('''\n''' ) if start_prompt is not None: while not lines[index].startswith(__snake_case ): index += 1 _UpperCamelCase = ['''\n'''.join(lines[:index] )] else: _UpperCamelCase = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). _UpperCamelCase = [lines[index]] index += 1 while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ): current_block.append(lines[index] ) blocks.append('''\n'''.join(__snake_case ) ) if index < len(__snake_case ) - 1: _UpperCamelCase = [lines[index + 1]] index += 1 else: _UpperCamelCase = [] else: blocks.append('''\n'''.join(__snake_case ) ) _UpperCamelCase = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(__snake_case ) > 0: blocks.append('''\n'''.join(__snake_case ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(__snake_case ): blocks.append('''\n'''.join(lines[index:] ) ) return blocks def _snake_case ( __snake_case ): def _inner(__snake_case ): return key(__snake_case ).lower().replace('''_''' , '''''' ) return _inner def _snake_case ( __snake_case , __snake_case=None ): # If no key is provided, we use a noop. def noop(__snake_case ): return x if key is None: _UpperCamelCase = noop # Constants are all uppercase, they go first. _UpperCamelCase = [obj for obj in objects if key(__snake_case ).isupper()] # Classes are not all uppercase but start with a capital, they go second. _UpperCamelCase = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()] # Functions begin with a lowercase, they go last. _UpperCamelCase = [obj for obj in objects if not key(__snake_case )[0].isupper()] _UpperCamelCase = ignore_underscore(__snake_case ) return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) def _snake_case ( __snake_case ): # This inner function sort imports between [ ]. def _replace(__snake_case ): _UpperCamelCase = match.groups()[0] if "," not in imports: return f"""[{imports}]""" _UpperCamelCase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _UpperCamelCase = keys[:-1] return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(__snake_case )] ) + "]" _UpperCamelCase = import_statement.split('''\n''' ) if len(__snake_case ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. _UpperCamelCase = 2 if lines[1].strip() == '''[''' else 1 _UpperCamelCase = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] _UpperCamelCase = sort_objects(__snake_case , key=lambda __snake_case : x[1] ) _UpperCamelCase = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(__snake_case ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: _UpperCamelCase = _re_bracket_content.sub(_replace , lines[1] ) else: _UpperCamelCase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _UpperCamelCase = keys[:-1] _UpperCamelCase = get_indent(lines[1] ) + ''', '''.join([f"""\"{k}\"""" for k in sort_objects(__snake_case )] ) return "\n".join(__snake_case ) else: # Finally we have to deal with imports fitting on one line _UpperCamelCase = _re_bracket_content.sub(_replace , __snake_case ) return import_statement def _snake_case ( __snake_case , __snake_case=True ): with open(__snake_case , '''r''' ) as f: _UpperCamelCase = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 _UpperCamelCase = split_code_in_indented_blocks( __snake_case , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(__snake_case ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. _UpperCamelCase = main_blocks[block_idx] _UpperCamelCase = block.split('''\n''' ) # Get to the start of the imports. _UpperCamelCase = 0 while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: _UpperCamelCase = len(__snake_case ) else: line_idx += 1 if line_idx >= len(__snake_case ): continue # Ignore beginning and last line: they don't contain anything. _UpperCamelCase = '''\n'''.join(block_lines[line_idx:-1] ) _UpperCamelCase = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. _UpperCamelCase = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case ) # We have two categories of import key: list or _import_structure[key].append/extend _UpperCamelCase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. _UpperCamelCase = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. _UpperCamelCase = [(i, key) for i, key in enumerate(__snake_case ) if key is not None] _UpperCamelCase = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. _UpperCamelCase = 0 _UpperCamelCase = [] for i in range(len(__snake_case ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: _UpperCamelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(__snake_case ) count += 1 # And we put our main block back together with its first and last line. _UpperCamelCase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(__snake_case ): if check_only: return True else: print(f"""Overwriting {file}.""" ) with open(__snake_case , '''w''' ) as f: f.write('''\n'''.join(__snake_case ) ) def _snake_case ( __snake_case=True ): _UpperCamelCase = [] for root, _, files in os.walk(__snake_case ): if "__init__.py" in files: _UpperCamelCase = sort_imports(os.path.join(__snake_case , '''__init__.py''' ) , check_only=__snake_case ) if result: _UpperCamelCase = [os.path.join(__snake_case , '''__init__.py''' )] if len(__snake_case ) > 0: raise ValueError(f"""Would overwrite {len(__snake_case )} files, run `make style`.""" ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") _lowerCAmelCase = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
717
def _snake_case ( __snake_case , __snake_case , __snake_case ): if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod else: _UpperCamelCase = binary_exponentiation(__snake_case , n / 2 , __snake_case ) return (b * b) % mod # a prime number _lowerCAmelCase = 701 _lowerCAmelCase = 1_000_000_000 _lowerCAmelCase = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
71
0
def _snake_case ( __snake_case ): _UpperCamelCase , _UpperCamelCase = [], [] while len(__snake_case ) > 1: _UpperCamelCase , _UpperCamelCase = min(__snake_case ), max(__snake_case ) start.append(__snake_case ) end.append(__snake_case ) collection.remove(__snake_case ) collection.remove(__snake_case ) end.reverse() return start + collection + end if __name__ == "__main__": _lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip() _lowerCAmelCase = [int(item) for item in user_input.split(",")] print(*merge_sort(unsorted), sep=",")
718
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = (1 - _cos) / 2 _UpperCamelCase = 1 - _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = (1 + _cos) / 2 _UpperCamelCase = -1 - _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = _sin / 2 _UpperCamelCase = 0 _UpperCamelCase = -ba _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 1 - alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = 1 + alpha * big_a _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha * big_a _UpperCamelCase = 1 + alpha / big_a _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha / big_a _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase = 2 * sqrt(__snake_case ) * alpha _UpperCamelCase = big_a * (pmc + aaa) _UpperCamelCase = 2 * big_a * mpc _UpperCamelCase = big_a * (pmc - aaa) _UpperCamelCase = ppmc + aaa _UpperCamelCase = -2 * pmpc _UpperCamelCase = ppmc - aaa _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase = 2 * sqrt(__snake_case ) * alpha _UpperCamelCase = big_a * (ppmc + aaa) _UpperCamelCase = -2 * big_a * pmpc _UpperCamelCase = big_a * (ppmc - aaa) _UpperCamelCase = pmc + aaa _UpperCamelCase = 2 * mpc _UpperCamelCase = pmc - aaa _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
71
0
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 _lowerCAmelCase = get_tests_dir("fixtures") class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Optional[Any] ): # A mock response for an HTTP head request to emulate server down _UpperCamelCase = mock.Mock() _UpperCamelCase = 500 _UpperCamelCase = {} _UpperCamelCase = HTTPError _UpperCamelCase = {} # Download this model to make sure it's in the cache. _UpperCamelCase = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_A ) as mock_head: _UpperCamelCase = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self : Tuple ): # This test is for deprecated behavior and can be removed in v5 _UpperCamelCase = ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def UpperCamelCase_ ( self : str ): with self.assertRaises(_A ): # config is in subfolder, the following should not work without specifying the subfolder _UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) _UpperCamelCase = AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' ) self.assertIsNotNone(_A ) @is_staging_test class lowerCAmelCase_ ( unittest.TestCase ): @classmethod def UpperCamelCase_ ( cls : Optional[int] ): _UpperCamelCase = TOKEN HfFolder.save_token(_A ) @classmethod def UpperCamelCase_ ( cls : Optional[int] ): try: delete_repo(token=cls._token , repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def UpperCamelCase_ ( self : int ): _UpperCamelCase = ViTImageProcessor.from_pretrained(_A ) image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token ) _UpperCamelCase = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(_A , getattr(_A , _A ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _A , repo_id='''test-image-processor''' , push_to_hub=_A , use_auth_token=self._token ) _UpperCamelCase = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(_A , getattr(_A , _A ) ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = ViTImageProcessor.from_pretrained(_A ) image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token ) _UpperCamelCase = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_A , getattr(_A , _A ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _A , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_A , use_auth_token=self._token ) _UpperCamelCase = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_A , getattr(_A , _A ) ) def UpperCamelCase_ ( self : List[Any] ): CustomImageProcessor.register_for_auto_class() _UpperCamelCase = CustomImageProcessor.from_pretrained(_A ) image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , ) _UpperCamelCase = AutoImageProcessor.from_pretrained( F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=_A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
719
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "gpt_neox" def __init__( self : Union[str, Any] , _A : Union[str, Any]=5_0432 , _A : List[Any]=6144 , _A : int=44 , _A : int=64 , _A : Optional[Any]=2_4576 , _A : Any="gelu" , _A : Tuple=0.25 , _A : Union[str, Any]=1_0000 , _A : Tuple=0.0 , _A : Any=0.0 , _A : int=0.1 , _A : List[str]=2048 , _A : Dict=0.02 , _A : Optional[Any]=1e-5 , _A : Tuple=True , _A : List[Any]=0 , _A : Optional[int]=2 , _A : Optional[int]=False , _A : List[Any]=True , _A : Any=None , **_A : Any , ): super().__init__(bos_token_id=_A , eos_token_id=_A , **_A ) _UpperCamelCase = vocab_size _UpperCamelCase = max_position_embeddings _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = rotary_pct _UpperCamelCase = rotary_emb_base _UpperCamelCase = attention_dropout _UpperCamelCase = hidden_dropout _UpperCamelCase = classifier_dropout _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = use_cache _UpperCamelCase = tie_word_embeddings _UpperCamelCase = use_parallel_residual _UpperCamelCase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def UpperCamelCase_ ( self : str ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F"""got {self.rope_scaling}""" ) _UpperCamelCase = self.rope_scaling.get('''type''' , _A ) _UpperCamelCase = self.rope_scaling.get('''factor''' , _A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
71
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = "▁" _lowerCAmelCase = {"vocab_file": "sentencepiece.bpe.model"} _lowerCAmelCase = { "vocab_file": { "facebook/mbart-large-50-one-to-many-mmt": ( "https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model" ), } } _lowerCAmelCase = { "facebook/mbart-large-50-one-to-many-mmt": 1_024, } # fmt: off _lowerCAmelCase = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = VOCAB_FILES_NAMES UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase = ["input_ids", "attention_mask"] UpperCAmelCase = [] UpperCAmelCase = [] def __init__( self : Tuple , _A : str , _A : Any=None , _A : List[Any]=None , _A : List[str]="</s>" , _A : Optional[int]="</s>" , _A : List[Any]="<s>" , _A : int="<unk>" , _A : List[Any]="<pad>" , _A : List[str]="<mask>" , _A : Optional[Dict[str, Any]] = None , **_A : List[Any] , ): # Mask token behave like a normal word, i.e. include the space before it _UpperCamelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token _UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs _UpperCamelCase = kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=_A , tgt_lang=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) _UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_A ) ) _UpperCamelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _UpperCamelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _UpperCamelCase = 1 _UpperCamelCase = len(self.sp_model ) _UpperCamelCase = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_A ) } _UpperCamelCase = {v: k for k, v in self.lang_code_to_id.items()} _UpperCamelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) _UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _UpperCamelCase = src_lang if src_lang is not None else '''en_XX''' _UpperCamelCase = self.lang_code_to_id[self._src_lang] _UpperCamelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCamelCase_ ( self : List[str] ): return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def UpperCamelCase_ ( self : Optional[int] ): return self._src_lang @src_lang.setter def UpperCamelCase_ ( self : Dict , _A : str ): _UpperCamelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : List[str] ): _UpperCamelCase = self.__dict__.copy() _UpperCamelCase = None return state def __setstate__( self : List[Any] , _A : Dict ): _UpperCamelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCamelCase = {} _UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self : Any ): _UpperCamelCase = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self : str , _A : str ): return self.sp_model.encode(_A , out_type=_A ) def UpperCamelCase_ ( self : Dict , _A : str ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _UpperCamelCase = self.sp_model.PieceToId(_A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCamelCase_ ( self : int , _A : int ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self : List[Any] , _A : str ): _UpperCamelCase = [] _UpperCamelCase = '''''' _UpperCamelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_A ) + token _UpperCamelCase = True _UpperCamelCase = [] else: current_sub_tokens.append(_A ) _UpperCamelCase = False out_string += self.sp_model.decode(_A ) return out_string.strip() def UpperCamelCase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ): if not os.path.isdir(_A ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _UpperCamelCase = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _A ) elif not os.path.isfile(self.vocab_file ): with open(_A , '''wb''' ) as fi: _UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(_A ) return (out_vocab_file,) def UpperCamelCase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) _UpperCamelCase = [1] * len(self.prefix_tokens ) _UpperCamelCase = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_A )) + suffix_ones return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones def UpperCamelCase_ ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCamelCase_ ( self : str , _A : Optional[int] , _A : str , _A : Optional[str] , _A : Optional[str] , **_A : List[Any] ): if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) _UpperCamelCase = src_lang _UpperCamelCase = self(_A , add_special_tokens=_A , return_tensors=_A , **_A ) _UpperCamelCase = self.convert_tokens_to_ids(_A ) _UpperCamelCase = tgt_lang_id return inputs def UpperCamelCase_ ( self : int , _A : List[str] , _A : str = "en_XX" , _A : Optional[List[str]] = None , _A : str = "ro_RO" , **_A : List[str] , ): _UpperCamelCase = src_lang _UpperCamelCase = tgt_lang return super().prepare_seqaseq_batch(_A , _A , **_A ) def UpperCamelCase_ ( self : str ): return self.set_src_lang_special_tokens(self.src_lang ) def UpperCamelCase_ ( self : List[Any] ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCamelCase_ ( self : Tuple , _A : str ): _UpperCamelCase = self.lang_code_to_id[src_lang] _UpperCamelCase = [self.cur_lang_code_id] _UpperCamelCase = [self.eos_token_id] def UpperCamelCase_ ( self : List[str] , _A : str ): _UpperCamelCase = self.lang_code_to_id[tgt_lang] _UpperCamelCase = [self.cur_lang_code_id] _UpperCamelCase = [self.eos_token_id]
720
from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=__lowercase ): UpperCAmelCase = ["keras_nlp"] def __init__( self : Any , *_A : Dict , **_A : List[str] ): requires_backends(self , ['''keras_nlp'''] )
71
0
import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): def __init__( self : Optional[int] , _A : Dict , _A : Tuple=7 , _A : Tuple=3 , _A : int=18 , _A : List[str]=30 , _A : Optional[int]=400 , _A : str=True , _A : Dict=None , _A : List[str]=True , ): _UpperCamelCase = size if size is not None else {'''height''': 18, '''width''': 18} _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = num_channels _UpperCamelCase = image_size _UpperCamelCase = min_resolution _UpperCamelCase = max_resolution _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = do_normalize def UpperCamelCase_ ( self : Union[str, Any] ): return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804], [-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class lowerCAmelCase_ ( __lowercase, unittest.TestCase ): UpperCAmelCase = ImageGPTImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = ImageGPTImageProcessingTester(self ) @property def UpperCamelCase_ ( self : Tuple ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''clusters''' ) ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) _UpperCamelCase = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(_A , obj[key] ) ) else: self.assertEqual(obj[key] , _A ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCamelCase = os.path.join(_A , '''image_processor.json''' ) image_processor_first.to_json_file(_A ) _UpperCamelCase = self.image_processing_class.from_json_file(_A ).to_dict() _UpperCamelCase = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(_A , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , _A ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(_A ) _UpperCamelCase = self.image_processing_class.from_pretrained(_A ).to_dict() _UpperCamelCase = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(_A , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , _A ) @unittest.skip('''ImageGPT requires clusters at initialization''' ) def UpperCamelCase_ ( self : int ): pass def _snake_case ( ): _UpperCamelCase = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' ) _UpperCamelCase = Image.open(dataset[4]['''file'''] ) _UpperCamelCase = Image.open(dataset[5]['''file'''] ) _UpperCamelCase = [imagea, imagea] return images @require_vision @require_torch class lowerCAmelCase_ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' ) _UpperCamelCase = prepare_images() # test non-batched _UpperCamelCase = image_processing(images[0] , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1024) ) _UpperCamelCase = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , _A ) # test batched _UpperCamelCase = image_processing(_A , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1024) ) _UpperCamelCase = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , _A )
721
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = "RegNetConfig" # Base docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = [1, 1_088, 7, 7] # Image classification docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = "tabby, tabby cat" _lowerCAmelCase = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , **_A : Any , ): super().__init__(**_A ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=_A , strides=_A , padding='''VALID''' , groups=_A , use_bias=_A , name='''convolution''' , ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) _UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity def UpperCamelCase_ ( self : Any , _A : Any ): _UpperCamelCase = self.convolution(self.padding(_A ) ) _UpperCamelCase = self.normalization(_A ) _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] , _A : RegNetConfig , **_A : Any ): super().__init__(**_A ) _UpperCamelCase = config.num_channels _UpperCamelCase = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ): _UpperCamelCase = shape_list(_A )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _UpperCamelCase = tf.transpose(_A , perm=(0, 2, 3, 1) ) _UpperCamelCase = self.embedder(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 2 , **_A : Optional[Any] ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=1 , strides=_A , use_bias=_A , name='''convolution''' ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def UpperCamelCase_ ( self : str , _A : tf.Tensor , _A : bool = False ): return self.normalization(self.convolution(_A ) , training=_A ) class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Dict , _A : int , _A : int , **_A : Dict ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) _UpperCamelCase = [ tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def UpperCamelCase_ ( self : List[str] , _A : List[Any] ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] _UpperCamelCase = self.pooler(_A ) for layer_module in self.attention: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = hidden_state * pooled return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : str ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.2''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Dict , _A : Tuple ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : int ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.3''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Tuple , _A : List[Any] ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Tuple , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , **_A : Union[str, Any] ): super().__init__(**_A ) _UpperCamelCase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer _UpperCamelCase = [ # downsampling is done in the first layer with stride of 2 layer(_A , _A , _A , stride=_A , name='''layers.0''' ), *[layer(_A , _A , _A , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[int] ): for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , **_A : List[str] ): super().__init__(**_A ) _UpperCamelCase = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) _UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_A , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_A , _A , _A , depth=_A , name=F"""stages.{i+1}""" ) ) def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : bool = False , _A : bool = True ): _UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) _UpperCamelCase = stage_module(_A ) if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A ) @keras_serializable class lowerCAmelCase_ ( tf.keras.layers.Layer ): UpperCAmelCase = RegNetConfig def __init__( self : int , _A : Tuple , **_A : int ): super().__init__(**_A ) _UpperCamelCase = config _UpperCamelCase = TFRegNetEmbeddings(_A , name='''embedder''' ) _UpperCamelCase = TFRegNetEncoder(_A , name='''encoder''' ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) @unpack_inputs def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.embedder(_A , training=_A ) _UpperCamelCase = self.encoder( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = encoder_outputs[0] _UpperCamelCase = self.pooler(_A ) # Change to NCHW output format have uniformity in the modules _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _UpperCamelCase = tuple([tf.transpose(_A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_A , pooler_output=_A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = RegNetConfig UpperCAmelCase = "regnet" UpperCAmelCase = "pixel_values" @property def UpperCamelCase_ ( self : Tuple ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} _lowerCAmelCase = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n" _lowerCAmelCase = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top.", __lowercase, ) class lowerCAmelCase_ ( __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Optional[int] , **_A : Tuple ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase_ ( self : Any , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[int]=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( pixel_values=_A , output_hidden_states=_A , return_dict=_A , training=_A , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", __lowercase, ) class lowerCAmelCase_ ( __lowercase, __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Any , **_A : int ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = config.num_labels _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) # classification head _UpperCamelCase = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase_ ( self : str , _A : tf.Tensor = None , _A : tf.Tensor = None , _A : bool = None , _A : bool = None , _A : Any=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCamelCase = self.classifier[0](_A ) _UpperCamelCase = self.classifier[1](_A ) _UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=_A , logits=_A ) if not return_dict: _UpperCamelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
71
0
'''simple docstring''' import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case=1024 ): _UpperCamelCase , _UpperCamelCase = [], [] _UpperCamelCase = list(zip(__snake_case , __snake_case ) ) _UpperCamelCase , _UpperCamelCase = sorted_examples[0] def is_too_big(__snake_case ): return tok(__snake_case , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): _UpperCamelCase = new_src + ''' ''' + src _UpperCamelCase = new_tgt + ''' ''' + tgt if is_too_big(__snake_case ) or is_too_big(__snake_case ): # cant fit, finalize example finished_src.append(__snake_case ) finished_tgt.append(__snake_case ) _UpperCamelCase , _UpperCamelCase = src, tgt else: # can fit, keep adding _UpperCamelCase , _UpperCamelCase = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(__snake_case ) finished_tgt.append(__snake_case ) return finished_src, finished_tgt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = Path(__snake_case ) save_path.mkdir(exist_ok=__snake_case ) for split in ["train"]: _UpperCamelCase , _UpperCamelCase = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" _UpperCamelCase = [x.rstrip() for x in Path(__snake_case ).open().readlines()] _UpperCamelCase = [x.rstrip() for x in Path(__snake_case ).open().readlines()] _UpperCamelCase , _UpperCamelCase = pack_examples(__snake_case , __snake_case , __snake_case , __snake_case ) print(f"""packed {split} split from {len(__snake_case )} examples -> {len(__snake_case )}.""" ) Path(save_path / f"""{split}.source""" ).open('''w''' ).write('''\n'''.join(__snake_case ) ) Path(save_path / f"""{split}.target""" ).open('''w''' ).write('''\n'''.join(__snake_case ) ) for split in ["val", "test"]: _UpperCamelCase , _UpperCamelCase = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" shutil.copyfile(__snake_case , save_path / f"""{split}.source""" ) shutil.copyfile(__snake_case , save_path / f"""{split}.target""" ) def _snake_case ( ): _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--tok_name''' , type=__snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''--max_seq_len''' , type=__snake_case , default=128 ) parser.add_argument('''--data_dir''' , type=__snake_case ) parser.add_argument('''--save_path''' , type=__snake_case ) _UpperCamelCase = parser.parse_args() _UpperCamelCase = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(__snake_case , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
700
from sklearn.metrics import mean_squared_error import datasets _lowerCAmelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" _lowerCAmelCase = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n" _lowerCAmelCase = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def UpperCamelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def UpperCamelCase_ ( self : Dict ): if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : List[str] , _A : Dict=None , _A : List[str]="uniform_average" , _A : int=True ): _UpperCamelCase = mean_squared_error( _A , _A , sample_weight=_A , multioutput=_A , squared=_A ) return {"mse": mse}
71
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } _lowerCAmelCase = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): for attribute in key.split('''.''' ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models _UpperCamelCase = '''lm_head''' _UpperCamelCase = getattr(__snake_case , __snake_case ) if weight_type is not None: _UpperCamelCase = getattr(__snake_case , __snake_case ).shape else: _UpperCamelCase = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": _UpperCamelCase = value elif weight_type == "weight_g": _UpperCamelCase = value elif weight_type == "weight_v": _UpperCamelCase = value elif weight_type == "bias": _UpperCamelCase = value else: _UpperCamelCase = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _snake_case ( __snake_case , __snake_case , __snake_case ): _UpperCamelCase = [] _UpperCamelCase = fairseq_model.state_dict() _UpperCamelCase = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): _UpperCamelCase = False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): _UpperCamelCase = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _UpperCamelCase = True if "*" in mapped_key: _UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2] _UpperCamelCase = mapped_key.replace('''*''' , __snake_case ) if "weight_g" in name: _UpperCamelCase = '''weight_g''' elif "weight_v" in name: _UpperCamelCase = '''weight_v''' elif "bias" in name: _UpperCamelCase = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj _UpperCamelCase = '''weight''' else: _UpperCamelCase = None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = full_name.split('''conv_layers.''' )[-1] _UpperCamelCase = name.split('''.''' ) _UpperCamelCase = int(items[0] ) _UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _UpperCamelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__snake_case ) @torch.no_grad() def _snake_case ( __snake_case , __snake_case , __snake_case=None , __snake_case=None , __snake_case=True ): if config_path is not None: _UpperCamelCase = UniSpeechConfig.from_pretrained(__snake_case ) else: _UpperCamelCase = UniSpeechConfig() if is_finetuned: if dict_path: _UpperCamelCase = Dictionary.load_from_json(__snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _UpperCamelCase = target_dict.pad_index _UpperCamelCase = target_dict.bos_index _UpperCamelCase = target_dict.eos_index _UpperCamelCase = len(target_dict.symbols ) _UpperCamelCase = os.path.join(__snake_case , '''vocab.json''' ) if not os.path.isdir(__snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__snake_case ) ) return os.makedirs(__snake_case , exist_ok=__snake_case ) _UpperCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched _UpperCamelCase = 42 _UpperCamelCase = 43 with open(__snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(__snake_case , __snake_case ) _UpperCamelCase = WavaVecaPhonemeCTCTokenizer( __snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__snake_case , ) _UpperCamelCase = True if config.feat_extract_norm == '''layer''' else False _UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , ) _UpperCamelCase = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case ) processor.save_pretrained(__snake_case ) _UpperCamelCase = UniSpeechForCTC(__snake_case ) else: _UpperCamelCase = UniSpeechForPreTraining(__snake_case ) if is_finetuned: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} ) else: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _UpperCamelCase = model[0].eval() recursively_load_weights(__snake_case , __snake_case , __snake_case ) hf_unispeech.save_pretrained(__snake_case ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _lowerCAmelCase = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
701
import os import re import shutil import sys import tempfile import unittest import black _lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) _UpperCamelCase = self.diffusers_dir shutil.copy( os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def UpperCamelCase_ ( self : Union[str, Any] , _A : Tuple , _A : Optional[Any] , _A : Dict , _A : List[str]=None ): _UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: _UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result _UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) _UpperCamelCase = black.format_str(_A , mode=_A ) _UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(_A , '''w''' , newline='''\n''' ) as f: f.write(_A ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_A ) with open(_A , '''r''' ) as f: self.assertTrue(f.read() , _A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(_A , _A ) def UpperCamelCase_ ( self : Optional[Any] ): # Base copy consistency self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , ) # Copy consistency with a really long name _UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
71
0
import re def _snake_case ( __snake_case ): _UpperCamelCase = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' ) if match := re.search(__snake_case , __snake_case ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("+918827897895"))
702
from __future__ import annotations import math class lowerCAmelCase_ : def __init__( self : int , _A : int ): _UpperCamelCase = size # approximate the overall size of segment tree with given value _UpperCamelCase = [0 for i in range(0 , 4 * size )] # create array to store lazy update _UpperCamelCase = [0 for i in range(0 , 4 * size )] _UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update def UpperCamelCase_ ( self : str , _A : int ): return idx * 2 def UpperCamelCase_ ( self : Any , _A : int ): return idx * 2 + 1 def UpperCamelCase_ ( self : Union[str, Any] , _A : int , _A : int , _A : int , _A : list[int] ): if left_element == right_element: _UpperCamelCase = a[left_element - 1] else: _UpperCamelCase = (left_element + right_element) // 2 self.build(self.left(_A ) , _A , _A , _A ) self.build(self.right(_A ) , mid + 1 , _A , _A ) _UpperCamelCase = max( self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] ) def UpperCamelCase_ ( self : Tuple , _A : int , _A : int , _A : int , _A : int , _A : int , _A : int ): if self.flag[idx] is True: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = False if left_element != right_element: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = self.lazy[idx] _UpperCamelCase = True _UpperCamelCase = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: _UpperCamelCase = val if left_element != right_element: _UpperCamelCase = val _UpperCamelCase = val _UpperCamelCase = True _UpperCamelCase = True return True _UpperCamelCase = (left_element + right_element) // 2 self.update(self.left(_A ) , _A , _A , _A , _A , _A ) self.update(self.right(_A ) , mid + 1 , _A , _A , _A , _A ) _UpperCamelCase = max( self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] ) return True def UpperCamelCase_ ( self : Any , _A : int , _A : int , _A : int , _A : int , _A : int ): if self.flag[idx] is True: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = False if left_element != right_element: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = self.lazy[idx] _UpperCamelCase = True _UpperCamelCase = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] _UpperCamelCase = (left_element + right_element) // 2 _UpperCamelCase = self.query(self.left(_A ) , _A , _A , _A , _A ) _UpperCamelCase = self.query(self.right(_A ) , mid + 1 , _A , _A , _A ) return max(_A , _A ) def __str__( self : Tuple ): return str([self.query(1 , 1 , self.size , _A , _A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": _lowerCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] _lowerCAmelCase = 15 _lowerCAmelCase = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
71
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", } class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "markuplm" def __init__( self : Optional[int] , _A : Dict=3_0522 , _A : Union[str, Any]=768 , _A : Dict=12 , _A : Union[str, Any]=12 , _A : Optional[int]=3072 , _A : List[str]="gelu" , _A : Optional[Any]=0.1 , _A : str=0.1 , _A : List[str]=512 , _A : Optional[int]=2 , _A : Optional[Any]=0.02 , _A : Dict=1e-12 , _A : Dict=0 , _A : List[str]=0 , _A : Any=2 , _A : Tuple=256 , _A : Tuple=1024 , _A : str=216 , _A : str=1001 , _A : Any=32 , _A : Any=50 , _A : Optional[Any]="absolute" , _A : Tuple=True , _A : Any=None , **_A : Any , ): super().__init__( pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A , ) _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = hidden_act _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = position_embedding_type _UpperCamelCase = use_cache _UpperCamelCase = classifier_dropout # additional properties _UpperCamelCase = max_depth _UpperCamelCase = max_xpath_tag_unit_embeddings _UpperCamelCase = max_xpath_subs_unit_embeddings _UpperCamelCase = tag_pad_id _UpperCamelCase = subs_pad_id _UpperCamelCase = xpath_unit_hidden_size
703
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase = { "configuration_jukebox": [ "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "JukeboxConfig", "JukeboxPriorConfig", "JukeboxVQVAEConfig", ], "tokenization_jukebox": ["JukeboxTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST", "JukeboxModel", "JukeboxPreTrainedModel", "JukeboxVQVAE", "JukeboxPrior", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
71
0
from math import sqrt def _snake_case ( __snake_case = 1000000 ): _UpperCamelCase = 0 _UpperCamelCase = 0 _UpperCamelCase = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(__snake_case , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'{solution() = }')
704
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class lowerCAmelCase_ ( __lowercase ): def __init__( self : int , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : str , ): super().__init__( _A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , ) _UpperCamelCase = field _UpperCamelCase = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths} _UpperCamelCase = Json( cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , ) def UpperCamelCase_ ( self : List[str] ): # Build iterable dataset if self.streaming: _UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None self.builder.download_and_prepare( download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , ) _UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=_A , in_memory=self.keep_in_memory ) return dataset class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : List[str] , ): if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) _UpperCamelCase = dataset _UpperCamelCase = path_or_buf _UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _UpperCamelCase = num_proc _UpperCamelCase = '''utf-8''' _UpperCamelCase = to_json_kwargs def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = self.to_json_kwargs.pop('''path_or_buf''' , _A ) _UpperCamelCase = self.to_json_kwargs.pop('''orient''' , '''records''' ) _UpperCamelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False ) _UpperCamelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True ) _UpperCamelCase = self.to_json_kwargs.pop('''compression''' , _A ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , '''wb''' , compression=_A ) as buffer: _UpperCamelCase = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F"""The compression parameter is not supported when writing to a buffer, but compression={compression}""" ''' was passed. Please provide a local path instead.''' ) _UpperCamelCase = self._write( file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) return written def UpperCamelCase_ ( self : Any , _A : Optional[Any] ): _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = args _UpperCamelCase = query_table( table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , ) _UpperCamelCase = batch.to_pandas().to_json( path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A ) if not json_str.endswith('''\n''' ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCamelCase_ ( self : int , _A : BinaryIO , _A : Dict , _A : Optional[Any] , _A : Dict , **_A : str , ): _UpperCamelCase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): _UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(_A ) else: _UpperCamelCase , _UpperCamelCase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): written += file_obj.write(_A ) return written
71
0
from collections import namedtuple import requests from lxml import html # type: ignore _lowerCAmelCase = namedtuple("covid_data", "cases deaths recovered") def _snake_case ( __snake_case = "https://www.worldometers.info/coronavirus/" ): _UpperCamelCase = '''//div[@class = "maincounter-number"]/span/text()''' return covid_data(*html.fromstring(requests.get(__snake_case ).content ).xpath(__snake_case ) ) _lowerCAmelCase = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
705
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class lowerCAmelCase_ ( enum.Enum ): UpperCAmelCase = 0 UpperCAmelCase = 1 UpperCAmelCase = 2 @add_end_docstrings(__lowercase ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self : Tuple , *_A : List[str] , **_A : str ): super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _UpperCamelCase = None if self.model.config.prefix is not None: _UpperCamelCase = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _UpperCamelCase = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._sanitize_parameters(prefix=_A , **self._forward_params ) _UpperCamelCase = {**self._preprocess_params, **preprocess_params} _UpperCamelCase = {**self._forward_params, **forward_params} def UpperCamelCase_ ( self : Dict , _A : Optional[int]=None , _A : Any=None , _A : Optional[int]=None , _A : List[str]=None , _A : List[Any]=None , _A : int=None , _A : Tuple=None , _A : Optional[Any]=None , **_A : Optional[int] , ): _UpperCamelCase = {} if prefix is not None: _UpperCamelCase = prefix if prefix: _UpperCamelCase = self.tokenizer( _A , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) _UpperCamelCase = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected""" ''' [None, \'hole\']''' ) _UpperCamelCase = handle_long_generation preprocess_params.update(_A ) _UpperCamelCase = generate_kwargs _UpperCamelCase = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) _UpperCamelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) _UpperCamelCase = ReturnType.TENSORS if return_type is not None: _UpperCamelCase = return_type if clean_up_tokenization_spaces is not None: _UpperCamelCase = clean_up_tokenization_spaces if stop_sequence is not None: _UpperCamelCase = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) _UpperCamelCase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCamelCase_ ( self : int , *_A : Union[str, Any] , **_A : Union[str, Any] ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_A , **_A ) def __call__( self : List[str] , _A : str , **_A : Any ): return super().__call__(_A , **_A ) def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : int="" , _A : Optional[Any]=None , **_A : Optional[Any] ): _UpperCamelCase = self.tokenizer( prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) _UpperCamelCase = prompt_text if handle_long_generation == "hole": _UpperCamelCase = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: _UpperCamelCase = generate_kwargs['''max_new_tokens'''] else: _UpperCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _UpperCamelCase = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) _UpperCamelCase = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: _UpperCamelCase = inputs['''attention_mask'''][:, -keep_length:] return inputs def UpperCamelCase_ ( self : Dict , _A : Optional[int] , **_A : str ): _UpperCamelCase = model_inputs['''input_ids'''] _UpperCamelCase = model_inputs.get('''attention_mask''' , _A ) # Allow empty prompts if input_ids.shape[1] == 0: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = 1 else: _UpperCamelCase = input_ids.shape[0] _UpperCamelCase = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _UpperCamelCase = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: _UpperCamelCase = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: _UpperCamelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _UpperCamelCase = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _UpperCamelCase = self.model.generate(input_ids=_A , attention_mask=_A , **_A ) _UpperCamelCase = generated_sequence.shape[0] if self.framework == "pt": _UpperCamelCase = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _UpperCamelCase = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def UpperCamelCase_ ( self : List[str] , _A : Dict , _A : Optional[Any]=ReturnType.FULL_TEXT , _A : Dict=True ): _UpperCamelCase = model_outputs['''generated_sequence'''][0] _UpperCamelCase = model_outputs['''input_ids'''] _UpperCamelCase = model_outputs['''prompt_text'''] _UpperCamelCase = generated_sequence.numpy().tolist() _UpperCamelCase = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _UpperCamelCase = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _UpperCamelCase = self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _UpperCamelCase = 0 else: _UpperCamelCase = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) ) if return_type == ReturnType.FULL_TEXT: _UpperCamelCase = prompt_text + text[prompt_length:] else: _UpperCamelCase = text[prompt_length:] _UpperCamelCase = {'''generated_text''': all_text} records.append(_A ) return records
71
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A ) _UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids _UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids _UpperCamelCase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss _UpperCamelCase = -(labels.shape[-1] * loss.item()) _UpperCamelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
706
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A ) _UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids _UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids _UpperCamelCase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss _UpperCamelCase = -(labels.shape[-1] * loss.item()) _UpperCamelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
71
0
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "artists_file": "artists.json", "lyrics_file": "lyrics.json", "genres_file": "genres.json", } _lowerCAmelCase = { "artists_file": { "jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json", }, "genres_file": { "jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json", }, "lyrics_file": { "jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json", }, } _lowerCAmelCase = { "jukebox": 512, } class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = VOCAB_FILES_NAMES UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase = PRETRAINED_LYRIC_TOKENS_SIZES UpperCAmelCase = ["input_ids", "attention_mask"] def __init__( self : int , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : List[str]=["v3", "v2", "v2"] , _A : int=512 , _A : List[Any]=5 , _A : Tuple="<|endoftext|>" , **_A : Optional[int] , ): _UpperCamelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token super().__init__( unk_token=_A , n_genres=_A , version=_A , max_n_lyric_tokens=_A , **_A , ) _UpperCamelCase = version _UpperCamelCase = max_n_lyric_tokens _UpperCamelCase = n_genres with open(_A , encoding='''utf-8''' ) as vocab_handle: _UpperCamelCase = json.load(_A ) with open(_A , encoding='''utf-8''' ) as vocab_handle: _UpperCamelCase = json.load(_A ) with open(_A , encoding='''utf-8''' ) as vocab_handle: _UpperCamelCase = json.load(_A ) _UpperCamelCase = R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: _UpperCamelCase = oov.replace(R'''\-\'''' , R'''\-+\'''' ) _UpperCamelCase = regex.compile(_A ) _UpperCamelCase = {v: k for k, v in self.artists_encoder.items()} _UpperCamelCase = {v: k for k, v in self.genres_encoder.items()} _UpperCamelCase = {v: k for k, v in self.lyrics_encoder.items()} @property def UpperCamelCase_ ( self : List[str] ): return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def UpperCamelCase_ ( self : Tuple ): return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder ) def UpperCamelCase_ ( self : Union[str, Any] , _A : Tuple , _A : Dict , _A : List[Any] ): _UpperCamelCase = [self.artists_encoder.get(_A , 0 ) for artist in list_artists] for genres in range(len(_A ) ): _UpperCamelCase = [self.genres_encoder.get(_A , 0 ) for genre in list_genres[genres]] _UpperCamelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) _UpperCamelCase = [[self.lyrics_encoder.get(_A , 0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def UpperCamelCase_ ( self : Tuple , _A : List[Any] ): return list(_A ) def UpperCamelCase_ ( self : Optional[int] , _A : Dict , _A : Optional[Any] , _A : List[str] , **_A : Dict ): _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.prepare_for_tokenization(_A , _A , _A ) _UpperCamelCase = self._tokenize(_A ) return artist, genre, lyrics def UpperCamelCase_ ( self : int , _A : str , _A : str , _A : str , _A : bool = False ): for idx in range(len(self.version ) ): if self.version[idx] == "v3": _UpperCamelCase = artists[idx].lower() _UpperCamelCase = [genres[idx].lower()] else: _UpperCamelCase = self._normalize(artists[idx] ) + '''.v2''' _UpperCamelCase = [ self._normalize(_A ) + '''.v2''' for genre in genres[idx].split('''_''' ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": _UpperCamelCase = regex.compile(R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' ) _UpperCamelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n''' _UpperCamelCase = {vocab[index]: index + 1 for index in range(len(_A ) )} _UpperCamelCase = 0 _UpperCamelCase = len(_A ) + 1 _UpperCamelCase = self.vocab _UpperCamelCase = {v: k for k, v in self.vocab.items()} _UpperCamelCase = '''''' else: _UpperCamelCase = regex.compile(R'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' ) _UpperCamelCase = self._run_strip_accents(_A ) _UpperCamelCase = lyrics.replace('''\\''' , '''\n''' ) _UpperCamelCase = self.out_of_vocab.sub('''''' , _A ), [], [] return artists, genres, lyrics def UpperCamelCase_ ( self : Optional[int] , _A : Optional[Any] ): _UpperCamelCase = unicodedata.normalize('''NFD''' , _A ) _UpperCamelCase = [] for char in text: _UpperCamelCase = unicodedata.category(_A ) if cat == "Mn": continue output.append(_A ) return "".join(_A ) def UpperCamelCase_ ( self : Tuple , _A : str ): _UpperCamelCase = ( [chr(_A ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )] + [chr(_A ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )] + [chr(_A ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )] + ['''.'''] ) _UpperCamelCase = frozenset(_A ) _UpperCamelCase = re.compile(R'''_+''' ) _UpperCamelCase = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] ) _UpperCamelCase = pattern.sub('''_''' , _A ).strip('''_''' ) return text def UpperCamelCase_ ( self : Union[str, Any] , _A : List[str] ): return " ".join(_A ) def UpperCamelCase_ ( self : List[str] , _A : List[str] , _A : Optional[Union[str, TensorType]] = None , _A : bool = False ): # Convert to TensorType if not isinstance(_A , _A ): _UpperCamelCase = TensorType(_A ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( '''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' ) import tensorflow as tf _UpperCamelCase = tf.constant _UpperCamelCase = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' ) import torch _UpperCamelCase = torch.tensor _UpperCamelCase = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' ) import jax.numpy as jnp # noqa: F811 _UpperCamelCase = jnp.array _UpperCamelCase = _is_jax else: _UpperCamelCase = np.asarray _UpperCamelCase = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: _UpperCamelCase = [inputs] if not is_tensor(_A ): _UpperCamelCase = as_tensor(_A ) except: # noqa E722 raise ValueError( '''Unable to create tensor, you should probably activate truncation and/or padding ''' '''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' ) return inputs def __call__( self : Optional[Any] , _A : Any , _A : int , _A : List[str]="" , _A : Tuple="pt" ): _UpperCamelCase = [0, 0, 0] _UpperCamelCase = [artist] * len(self.version ) _UpperCamelCase = [genres] * len(self.version ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.tokenize(_A , _A , _A ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._convert_token_to_id(_A , _A , _A ) _UpperCamelCase = [-INFINITY] * len(full_tokens[-1] ) _UpperCamelCase = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_A ) for i in range(len(self.version ) ) ] return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} ) def UpperCamelCase_ ( self : Optional[Any] , _A : str , _A : Optional[str] = None ): if not os.path.isdir(_A ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _UpperCamelCase = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] ) with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=_A ) ) _UpperCamelCase = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] ) with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=_A ) ) _UpperCamelCase = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] ) with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_A ) ) return (artists_file, genres_file, lyrics_file) def UpperCamelCase_ ( self : Dict , _A : Any , _A : int , _A : Any ): _UpperCamelCase = self.artists_decoder.get(_A ) _UpperCamelCase = [self.genres_decoder.get(_A ) for genre in genres_index] _UpperCamelCase = [self.lyrics_decoder.get(_A ) for character in lyric_index] return artist, genres, lyrics
707
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _lowerCAmelCase = logging.getLogger(__name__) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether tp freeze the encoder."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) UpperCAmelCase = field( default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, ) UpperCAmelCase = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field( default=142, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) UpperCAmelCase = field( default=142, metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field(default=-1, metadata={"help": "# training examples. -1 means use all."} ) UpperCAmelCase = field(default=-1, metadata={"help": "# validation examples. -1 means use all."} ) UpperCAmelCase = field(default=-1, metadata={"help": "# test examples. -1 means use all."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Source language id for translation."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Target language id for translation."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "# num_beams to use for evaluation."} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, ) def _snake_case ( __snake_case , __snake_case , __snake_case ): logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(__snake_case , os.path.join(__snake_case , f"""{split}_results.json""" ) ) def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses() check_output_dir(__snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __snake_case ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__snake_case , __snake_case , __snake_case ): assert hasattr(__snake_case , __snake_case ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) ) _UpperCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__snake_case , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__snake_case , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _UpperCamelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__snake_case , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__snake_case , __snake_case ): _UpperCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _UpperCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__snake_case ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _UpperCamelCase = SeqaSeqDataset # Get datasets _UpperCamelCase = ( dataset_class( __snake_case , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) _UpperCamelCase = ( dataset_class( __snake_case , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _UpperCamelCase = ( dataset_class( __snake_case , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer _UpperCamelCase = ( build_compute_metrics_fn(data_args.task , __snake_case ) if training_args.predict_with_generate else None ) _UpperCamelCase = SeqaSeqTrainer( model=__snake_case , args=__snake_case , data_args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , data_collator=SeqaSeqDataCollator( __snake_case , __snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__snake_case , tokenizer=__snake_case , ) _UpperCamelCase = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) _UpperCamelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _UpperCamelCase = train_result.metrics _UpperCamelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _UpperCamelCase = trainer.evaluate(metric_key_prefix='''val''' ) _UpperCamelCase = data_args.n_val _UpperCamelCase = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) if training_args.do_predict: logger.info('''*** Predict ***''' ) _UpperCamelCase = trainer.predict(test_dataset=__snake_case , metric_key_prefix='''test''' ) _UpperCamelCase = test_output.metrics _UpperCamelCase = data_args.n_test if trainer.is_world_process_zero(): _UpperCamelCase = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) if training_args.predict_with_generate: _UpperCamelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case ) _UpperCamelCase = lmap(str.strip , __snake_case ) write_txt_file(__snake_case , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__snake_case , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def _snake_case ( __snake_case ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
71
0
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCAmelCase_ ( __lowercase, unittest.TestCase ): UpperCAmelCase = KandinskyVaaControlnetImgaImgPipeline UpperCAmelCase = ["image_embeds", "negative_image_embeds", "image", "hint"] UpperCAmelCase = ["image_embeds", "negative_image_embeds", "image", "hint"] UpperCAmelCase = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] UpperCAmelCase = False @property def UpperCamelCase_ ( self : Dict ): return 32 @property def UpperCamelCase_ ( self : List[Any] ): return 32 @property def UpperCamelCase_ ( self : str ): return self.time_input_dim @property def UpperCamelCase_ ( self : List[Any] ): return self.time_input_dim * 4 @property def UpperCamelCase_ ( self : Any ): return 100 @property def UpperCamelCase_ ( self : int ): torch.manual_seed(0 ) _UpperCamelCase = { '''in_channels''': 8, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image_hint''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } _UpperCamelCase = UNetaDConditionModel(**_A ) return model @property def UpperCamelCase_ ( self : int ): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def UpperCamelCase_ ( self : List[Any] ): torch.manual_seed(0 ) _UpperCamelCase = VQModel(**self.dummy_movq_kwargs ) return model def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.dummy_unet _UpperCamelCase = self.dummy_movq _UpperCamelCase = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0085, '''beta_end''': 0.012, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } _UpperCamelCase = DDIMScheduler(**_A ) _UpperCamelCase = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : Dict=0 ): _UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A ) _UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _A ) # create init_image _UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A ) _UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _UpperCamelCase = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((256, 256) ) # create hint _UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A ) if str(_A ).startswith('''mps''' ): _UpperCamelCase = torch.manual_seed(_A ) else: _UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A ) _UpperCamelCase = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''hint''': hint, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = '''cpu''' _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = self.pipeline_class(**_A ) _UpperCamelCase = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = pipe(**self.get_dummy_inputs(_A ) ) _UpperCamelCase = output.images _UpperCamelCase = pipe( **self.get_dummy_inputs(_A ) , return_dict=_A , )[0] _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _UpperCamelCase = np.array( [0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Dict ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' ) _UpperCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) _UpperCamelCase = init_image.resize((512, 512) ) _UpperCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/hint_image_cat.png''' ) _UpperCamelCase = torch.from_numpy(np.array(_A ) ).float() / 255.0 _UpperCamelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) _UpperCamelCase = '''A robot, 4k photo''' _UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_A ) _UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa ) _UpperCamelCase = pipeline.to(_A ) pipeline.set_progress_bar_config(disable=_A ) _UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) _UpperCamelCase , _UpperCamelCase = pipe_prior( _A , image=_A , strength=0.85 , generator=_A , negative_prompt='''''' , ).to_tuple() _UpperCamelCase = pipeline( image=_A , image_embeds=_A , negative_image_embeds=_A , hint=_A , generator=_A , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , ) _UpperCamelCase = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_A , _A )
708
from __future__ import annotations import typing from collections import Counter def _snake_case ( __snake_case ): _UpperCamelCase = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(__snake_case , max_perimeter + 1 ): _UpperCamelCase = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(__snake_case ): _UpperCamelCase = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def _snake_case ( __snake_case = 1000 ): _UpperCamelCase = pythagorean_triple(__snake_case ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(f'Perimeter {solution()} has maximum solutions')
71
0
import os import re import shutil import sys import tempfile import unittest import black _lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) _UpperCamelCase = self.diffusers_dir shutil.copy( os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def UpperCamelCase_ ( self : Union[str, Any] , _A : Tuple , _A : Optional[Any] , _A : Dict , _A : List[str]=None ): _UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: _UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result _UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) _UpperCamelCase = black.format_str(_A , mode=_A ) _UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(_A , '''w''' , newline='''\n''' ) as f: f.write(_A ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_A ) with open(_A , '''r''' ) as f: self.assertTrue(f.read() , _A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(_A , _A ) def UpperCamelCase_ ( self : Optional[Any] ): # Base copy consistency self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , ) # Copy consistency with a really long name _UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
709
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = (DPMSolverSDEScheduler,) UpperCAmelCase = 10 def UpperCamelCase_ ( self : Tuple , **_A : Union[str, Any] ): _UpperCamelCase = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**_A ) return config def UpperCamelCase_ ( self : List[Any] ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def UpperCamelCase_ ( self : List[Any] ): for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def UpperCamelCase_ ( self : List[str] ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_A ) def UpperCamelCase_ ( self : Union[str, Any] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for i, t in enumerate(scheduler.timesteps ): _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2 assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for i, t in enumerate(scheduler.timesteps ): _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2 assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2 assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2 assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3 def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps , device=_A ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2 assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A , use_karras_sigmas=_A ) scheduler.set_timesteps(self.num_inference_steps , device=_A ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for t in scheduler.timesteps: _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
71
0
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ): # Return True if there is node that has not iterated. _UpperCamelCase = [False] * len(__snake_case ) _UpperCamelCase = [] queue.append(__snake_case ) _UpperCamelCase = True while queue: _UpperCamelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__snake_case ) _UpperCamelCase = True _UpperCamelCase = u return visited[t] def _snake_case ( __snake_case , __snake_case , __snake_case ): # This array is filled by BFS and to store path _UpperCamelCase = [-1] * (len(__snake_case )) _UpperCamelCase = 0 while bfs(__snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = float('''Inf''' ) _UpperCamelCase = sink while s != source: # Find the minimum value in select path _UpperCamelCase = min(__snake_case , graph[parent[s]][s] ) _UpperCamelCase = parent[s] max_flow += path_flow _UpperCamelCase = sink while v != source: _UpperCamelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCamelCase = parent[v] return max_flow _lowerCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _lowerCAmelCase, _lowerCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
710
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCAmelCase_ : @property def UpperCamelCase_ ( self : Optional[int] ): return self.get_dummy_input() @property def UpperCamelCase_ ( self : Dict ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def UpperCamelCase_ ( self : Union[str, Any] , _A : List[str]=True , _A : Any=False , _A : Union[str, Any]=False , _A : int=False , ): _UpperCamelCase = 4 _UpperCamelCase = 32 _UpperCamelCase = (32, 32) _UpperCamelCase = torch.manual_seed(0 ) _UpperCamelCase = torch.device(_A ) _UpperCamelCase = (batch_size, num_channels) + sizes _UpperCamelCase = randn_tensor(_A , generator=_A , device=_A ) _UpperCamelCase = {'''hidden_states''': hidden_states} if include_temb: _UpperCamelCase = 128 _UpperCamelCase = randn_tensor((batch_size, temb_channels) , generator=_A , device=_A ) if include_res_hidden_states_tuple: _UpperCamelCase = torch.manual_seed(1 ) _UpperCamelCase = (randn_tensor(_A , generator=_A , device=_A ),) if include_encoder_hidden_states: _UpperCamelCase = floats_tensor((batch_size, 32, 32) ).to(_A ) if include_skip_sample: _UpperCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_A , device=_A ) return dummy_input def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = { '''in_channels''': 32, '''out_channels''': 32, '''temb_channels''': 128, } if self.block_type == "up": _UpperCamelCase = 32 if self.block_type == "mid": init_dict.pop('''out_channels''' ) _UpperCamelCase = self.dummy_input return init_dict, inputs_dict def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.block_class(**_A ) unet_block.to(_A ) unet_block.eval() with torch.no_grad(): _UpperCamelCase = unet_block(**_A ) if isinstance(_A , _A ): _UpperCamelCase = output[0] self.assertEqual(output.shape , self.output_shape ) _UpperCamelCase = output[0, -1, -3:, -3:] _UpperCamelCase = torch.tensor(_A ).to(_A ) assert torch_all_close(output_slice.flatten() , _A , atol=5e-3 ) @unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' ) def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.block_class(**_A ) model.to(_A ) model.train() _UpperCamelCase = model(**_A ) if isinstance(_A , _A ): _UpperCamelCase = output[0] _UpperCamelCase = torch.device(_A ) _UpperCamelCase = randn_tensor(output.shape , device=_A ) _UpperCamelCase = torch.nn.functional.mse_loss(_A , _A ) loss.backward()
71
0
def _snake_case ( __snake_case = 10 ): if not isinstance(__snake_case , __snake_case ) or n < 0: raise ValueError('''Invalid input''' ) _UpperCamelCase = 10**n _UpperCamelCase = 28433 * (pow(2 , 7830457 , __snake_case )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f'{solution(10) = }')
711
def _snake_case ( __snake_case ): if not isinstance(__snake_case , __snake_case ): raise TypeError('''Input value must be an \'int\' type''' ) _UpperCamelCase = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
71
0
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = 42 UpperCAmelCase = 42 class lowerCAmelCase_ ( nn.Module ): UpperCAmelCase = 42 UpperCAmelCase = (16, 32, 96, 256) UpperCAmelCase = jnp.floataa def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _UpperCamelCase = [] for i in range(len(self.block_out_channels ) - 1 ): _UpperCamelCase = self.block_out_channels[i] _UpperCamelCase = self.block_out_channels[i + 1] _UpperCamelCase = nn.Conv( _A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(_A ) _UpperCamelCase = nn.Conv( _A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(_A ) _UpperCamelCase = blocks _UpperCamelCase = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self : Tuple , _A : Tuple ): _UpperCamelCase = self.conv_in(_A ) _UpperCamelCase = nn.silu(_A ) for block in self.blocks: _UpperCamelCase = block(_A ) _UpperCamelCase = nn.silu(_A ) _UpperCamelCase = self.conv_out(_A ) return embedding @flax_register_to_config class lowerCAmelCase_ ( nn.Module, __lowercase, __lowercase ): UpperCAmelCase = 32 UpperCAmelCase = 4 UpperCAmelCase = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) UpperCAmelCase = False UpperCAmelCase = (320, 640, 1280, 1280) UpperCAmelCase = 2 UpperCAmelCase = 8 UpperCAmelCase = None UpperCAmelCase = 1280 UpperCAmelCase = 0.0 UpperCAmelCase = False UpperCAmelCase = jnp.floataa UpperCAmelCase = True UpperCAmelCase = 0 UpperCAmelCase = "rgb" UpperCAmelCase = (16, 32, 96, 256) def UpperCamelCase_ ( self : List[Any] , _A : jax.random.KeyArray ): # init input tensors _UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size) _UpperCamelCase = jnp.zeros(_A , dtype=jnp.floataa ) _UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa ) _UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) _UpperCamelCase = (1, 3, self.sample_size * 8, self.sample_size * 8) _UpperCamelCase = jnp.zeros(_A , dtype=jnp.floataa ) _UpperCamelCase , _UpperCamelCase = jax.random.split(_A ) _UpperCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(_A , _A , _A , _A , _A )["params"] def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = self.block_out_channels _UpperCamelCase = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. _UpperCamelCase = self.num_attention_heads or self.attention_head_dim # input _UpperCamelCase = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time _UpperCamelCase = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) _UpperCamelCase = FlaxTimestepEmbedding(_A , dtype=self.dtype ) _UpperCamelCase = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) _UpperCamelCase = self.only_cross_attention if isinstance(_A , _A ): _UpperCamelCase = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_A , _A ): _UpperCamelCase = (num_attention_heads,) * len(self.down_block_types ) # down _UpperCamelCase = [] _UpperCamelCase = [] _UpperCamelCase = block_out_channels[0] _UpperCamelCase = nn.Conv( _A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_A ) for i, down_block_type in enumerate(self.down_block_types ): _UpperCamelCase = output_channel _UpperCamelCase = block_out_channels[i] _UpperCamelCase = i == len(_A ) - 1 if down_block_type == "CrossAttnDownBlock2D": _UpperCamelCase = FlaxCrossAttnDownBlockaD( in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: _UpperCamelCase = FlaxDownBlockaD( in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(_A ) for _ in range(self.layers_per_block ): _UpperCamelCase = nn.Conv( _A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_A ) if not is_final_block: _UpperCamelCase = nn.Conv( _A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_A ) _UpperCamelCase = down_blocks _UpperCamelCase = controlnet_down_blocks # mid _UpperCamelCase = block_out_channels[-1] _UpperCamelCase = FlaxUNetMidBlockaDCrossAttn( in_channels=_A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) _UpperCamelCase = nn.Conv( _A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self : str , _A : Dict , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : float = 1.0 , _A : bool = True , _A : bool = False , ): _UpperCamelCase = self.controlnet_conditioning_channel_order if channel_order == "bgr": _UpperCamelCase = jnp.flip(_A , axis=1 ) # 1. time if not isinstance(_A , jnp.ndarray ): _UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0: _UpperCamelCase = timesteps.astype(dtype=jnp.floataa ) _UpperCamelCase = jnp.expand_dims(_A , 0 ) _UpperCamelCase = self.time_proj(_A ) _UpperCamelCase = self.time_embedding(_A ) # 2. pre-process _UpperCamelCase = jnp.transpose(_A , (0, 2, 3, 1) ) _UpperCamelCase = self.conv_in(_A ) _UpperCamelCase = jnp.transpose(_A , (0, 2, 3, 1) ) _UpperCamelCase = self.controlnet_cond_embedding(_A ) sample += controlnet_cond # 3. down _UpperCamelCase = (sample,) for down_block in self.down_blocks: if isinstance(_A , _A ): _UpperCamelCase , _UpperCamelCase = down_block(_A , _A , _A , deterministic=not train ) else: _UpperCamelCase , _UpperCamelCase = down_block(_A , _A , deterministic=not train ) down_block_res_samples += res_samples # 4. mid _UpperCamelCase = self.mid_block(_A , _A , _A , deterministic=not train ) # 5. contronet blocks _UpperCamelCase = () for down_block_res_sample, controlnet_block in zip(_A , self.controlnet_down_blocks ): _UpperCamelCase = controlnet_block(_A ) controlnet_down_block_res_samples += (down_block_res_sample,) _UpperCamelCase = controlnet_down_block_res_samples _UpperCamelCase = self.controlnet_mid_block(_A ) # 6. scaling _UpperCamelCase = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=_A , mid_block_res_sample=_A )
712
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } _lowerCAmelCase = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): for attribute in key.split('''.''' ): _UpperCamelCase = getattr(__snake_case , __snake_case ) if weight_type is not None: _UpperCamelCase = getattr(__snake_case , __snake_case ).shape else: _UpperCamelCase = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": _UpperCamelCase = value elif weight_type == "weight_g": _UpperCamelCase = value elif weight_type == "weight_v": _UpperCamelCase = value elif weight_type == "bias": _UpperCamelCase = value else: _UpperCamelCase = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = [] _UpperCamelCase = fairseq_model.state_dict() _UpperCamelCase = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight _UpperCamelCase = None for name, value in fairseq_dict.items(): _UpperCamelCase = False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _UpperCamelCase = True elif name.split('''.''' )[0] == "proj": _UpperCamelCase = fairseq_model.proj _UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _UpperCamelCase = True if "*" in mapped_key: _UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2] _UpperCamelCase = mapped_key.replace('''*''' , __snake_case ) if "weight_g" in name: _UpperCamelCase = '''weight_g''' elif "weight_v" in name: _UpperCamelCase = '''weight_v''' elif "bias" in name: _UpperCamelCase = '''bias''' elif "weight" in name: _UpperCamelCase = '''weight''' else: _UpperCamelCase = None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) return proj_weight def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = full_name.split('''conv_layers.''' )[-1] _UpperCamelCase = name.split('''.''' ) _UpperCamelCase = int(items[0] ) _UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _UpperCamelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__snake_case ) def _snake_case ( __snake_case ): _UpperCamelCase , _UpperCamelCase = emb.weight.shape _UpperCamelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case ) _UpperCamelCase = emb.weight.data return lin_layer def _snake_case ( __snake_case ): with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: _UpperCamelCase = f.readlines() _UpperCamelCase = [line.split(''' ''' )[0] for line in lines] _UpperCamelCase = len(__snake_case ) _UpperCamelCase = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ): _UpperCamelCase = WavaVecaConfig.from_pretrained(__snake_case ) _UpperCamelCase = SpeechaTextaConfig.from_pretrained( __snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case ) _UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) _UpperCamelCase = model[0].eval() # set weights for wav2vec2 encoder _UpperCamelCase = WavaVecaModel(__snake_case ) _UpperCamelCase = recursively_load_weights_wavaveca(model.encoder , __snake_case ) _UpperCamelCase = SpeechaTextaForCausalLM(__snake_case ) _UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case ) # set output linear layer unexpected_keys.remove('''embed_out''' ) _UpperCamelCase = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) _UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case ) _UpperCamelCase = False # add projection layer _UpperCamelCase = nn.Parameter(projection_layer.weight ) _UpperCamelCase = nn.Parameter(projection_layer.bias ) _UpperCamelCase = create_vocab_dict(__snake_case ) with open(os.path.join(__snake_case , '''vocab.json''' ) , '''w''' ) as fp: json.dump(__snake_case , __snake_case ) _UpperCamelCase = SpeechaTextaTokenizer(os.path.join(__snake_case , '''vocab.json''' ) ) tokenizer.save_pretrained(__snake_case ) _UpperCamelCase = hf_wavavec.config.to_dict() _UpperCamelCase = tokenizer.pad_token_id _UpperCamelCase = tokenizer.bos_token_id _UpperCamelCase = tokenizer.eos_token_id _UpperCamelCase = '''speech_to_text_2''' _UpperCamelCase = '''wav2vec2''' _UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case ) hf_wavavec.save_pretrained(__snake_case ) feature_extractor.save_pretrained(__snake_case ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") _lowerCAmelCase = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
71
0
from typing import TYPE_CHECKING from ...utils import _LazyModule _lowerCAmelCase = {"tokenization_byt5": ["ByT5Tokenizer"]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
713
from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[str]=13 , _A : Union[str, Any]=7 , _A : int=True , _A : Optional[int]=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : Optional[int]=99 , _A : Union[str, Any]=32 , _A : Dict=2 , _A : List[Any]=4 , _A : Optional[Any]=37 , _A : int="gelu" , _A : Optional[int]=0.1 , _A : str=0.1 , _A : List[str]=512 , _A : Optional[Any]=16 , _A : Optional[Any]=2 , _A : Optional[int]=0.02 , _A : str=False , _A : int=True , _A : Any="None" , _A : Dict=3 , _A : List[Any]=4 , _A : Optional[Any]=None , ): _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_input_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_labels _UpperCamelCase = num_choices _UpperCamelCase = relative_attention _UpperCamelCase = position_biased_input _UpperCamelCase = pos_att_type _UpperCamelCase = scope def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_input_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCamelCase = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : int , _A : Optional[Any] ): _UpperCamelCase = TFDebertaVaModel(config=_A ) _UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCamelCase = [input_ids, input_mask] _UpperCamelCase = model(_A ) _UpperCamelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self : Dict , _A : Optional[int] , _A : Any , _A : Dict , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] ): _UpperCamelCase = TFDebertaVaForMaskedLM(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self : Dict , _A : Dict , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : Optional[Any] , _A : Tuple , _A : int ): _UpperCamelCase = self.num_labels _UpperCamelCase = TFDebertaVaForSequenceClassification(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : Optional[int] , _A : Any , _A : List[Any] , _A : Dict , _A : Union[str, Any] , _A : List[str] ): _UpperCamelCase = self.num_labels _UpperCamelCase = TFDebertaVaForTokenClassification(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self : Dict , _A : Optional[Any] , _A : Optional[int] , _A : Any , _A : List[str] , _A : str , _A : Optional[int] , _A : str ): _UpperCamelCase = TFDebertaVaForQuestionAnswering(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self : Any ): _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ): UpperCAmelCase = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) UpperCAmelCase = ( { "feature-extraction": TFDebertaVaModel, "fill-mask": TFDebertaVaForMaskedLM, "question-answering": TFDebertaVaForQuestionAnswering, "text-classification": TFDebertaVaForSequenceClassification, "token-classification": TFDebertaVaForTokenClassification, "zero-shot": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = TFDebertaVaModelTester(self ) _UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 ) def UpperCamelCase_ ( self : Any ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) self.assertIsNotNone(_A ) @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def UpperCamelCase_ ( self : List[Any] ): pass @slow def UpperCamelCase_ ( self : int ): _UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) _UpperCamelCase = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) _UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _UpperCamelCase = model(_A , attention_mask=_A )[0] _UpperCamelCase = tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1e-4 )
71
0
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowerCAmelCase = 16 _lowerCAmelCase = 32 def _snake_case ( __snake_case , __snake_case = 16 ): _UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _UpperCamelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__snake_case ): # max_length=None => use the model max length (it's actually the default) _UpperCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCamelCase = datasets.map( __snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__snake_case ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCamelCase = 16 elif accelerator.mixed_precision != "no": _UpperCamelCase = 8 else: _UpperCamelCase = None return tokenizer.pad( __snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , ) # Instantiate dataloaders. _UpperCamelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) _UpperCamelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowerCAmelCase = mocked_dataloaders # noqa: F811 def _snake_case ( __snake_case , __snake_case ): # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1": _UpperCamelCase = 2 # Initialize accelerator _UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCamelCase = config['''lr'''] _UpperCamelCase = int(config['''num_epochs'''] ) _UpperCamelCase = int(config['''seed'''] ) _UpperCamelCase = int(config['''batch_size'''] ) _UpperCamelCase = evaluate.load('''glue''' , '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__snake_case ) def inner_training_loop(__snake_case ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCamelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCamelCase = AdamW(params=model.parameters() , lr=__snake_case ) _UpperCamelCase , _UpperCamelCase = get_dataloaders(__snake_case , __snake_case ) # Instantiate scheduler _UpperCamelCase = get_linear_schedule_with_warmup( optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # Now we train the model for epoch in range(__snake_case ): model.train() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCamelCase = model(**__snake_case ) _UpperCamelCase = outputs.loss accelerator.backward(__snake_case ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCamelCase = model(**__snake_case ) _UpperCamelCase = outputs.logits.argmax(dim=-1 ) _UpperCamelCase , _UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__snake_case , references=__snake_case , ) _UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , __snake_case ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def _snake_case ( ): _UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) _UpperCamelCase = parser.parse_args() _UpperCamelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__snake_case , __snake_case ) if __name__ == "__main__": main()
714
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ): # Return True if there is node that has not iterated. _UpperCamelCase = [False] * len(__snake_case ) _UpperCamelCase = [] queue.append(__snake_case ) _UpperCamelCase = True while queue: _UpperCamelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__snake_case ) _UpperCamelCase = True _UpperCamelCase = u return visited[t] def _snake_case ( __snake_case , __snake_case , __snake_case ): # This array is filled by BFS and to store path _UpperCamelCase = [-1] * (len(__snake_case )) _UpperCamelCase = 0 while bfs(__snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = float('''Inf''' ) _UpperCamelCase = sink while s != source: # Find the minimum value in select path _UpperCamelCase = min(__snake_case , graph[parent[s]][s] ) _UpperCamelCase = parent[s] max_flow += path_flow _UpperCamelCase = sink while v != source: _UpperCamelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCamelCase = parent[v] return max_flow _lowerCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _lowerCAmelCase, _lowerCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
71
0
def _snake_case ( __snake_case ): if not isinstance(__snake_case , __snake_case ): raise TypeError('''Input value must be an \'int\' type''' ) _UpperCamelCase = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowerCAmelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST", "UniSpeechForCTC", "UniSpeechForPreTraining", "UniSpeechForSequenceClassification", "UniSpeechModel", "UniSpeechPreTrainedModel", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
71
0
import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def _snake_case ( __snake_case ): _UpperCamelCase = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__snake_case , __snake_case ) def _snake_case ( __snake_case ): _UpperCamelCase , _UpperCamelCase = emb.weight.shape _UpperCamelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case ) _UpperCamelCase = emb.weight.data return lin_layer def _snake_case ( __snake_case , __snake_case=None ): _UpperCamelCase = {} for old_key in state_dict.keys(): _UpperCamelCase = old_key if "moe_layer.experts." in key: if expert_idx is not None: _UpperCamelCase = key.replace('''moe_layer.experts.0''' , f"""ffn.experts.expert_{expert_idx}""" ) else: _UpperCamelCase = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' ) if "gate" in key: _UpperCamelCase = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' ) if "fc2" and "experts" not in key: _UpperCamelCase = key.replace('''.fc2.''' , '''.ffn.fc2.''' ) if "fc1" and "experts" not in key: _UpperCamelCase = key.replace('''.fc1.''' , '''.ffn.fc1.''' ) if ".encoder_attn." in key: _UpperCamelCase = key.replace('''.encoder_attn.''' , '''.cross_attention.''' ) if "encoder_attn_layer_norm" in key: _UpperCamelCase = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' ) if "final_layer_norm" in key: _UpperCamelCase = key.replace('''final_layer_norm''' , '''ff_layer_norm''' ) _UpperCamelCase = state_dict[old_key] return new_dict def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = WEIGHTS_NAME ): _UpperCamelCase = [] _UpperCamelCase = 0 os.makedirs(__snake_case , exist_ok=__snake_case ) for expert in range(__snake_case ): _UpperCamelCase = switch_checkpoint_path + f"""-rank-{expert}.pt""" if os.path.isfile(__snake_case ): _UpperCamelCase = torch.load(__snake_case )['''model'''] remove_ignore_keys_(__snake_case ) _UpperCamelCase = rename_fairseq_keys(__snake_case , __snake_case ) _UpperCamelCase = os.path.join( __snake_case , weights_name.replace('''.bin''' , f"""-{len(__snake_case )+1:05d}-of-???.bin""" ) ) torch.save(__snake_case , __snake_case ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__snake_case )[0]].dtype ) # Add the last block _UpperCamelCase = os.path.join(__snake_case , weights_name.replace('''.bin''' , f"""-{len(__snake_case )+1:05d}-of-???.bin""" ) ) _UpperCamelCase = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model'''] remove_ignore_keys_(__snake_case ) _UpperCamelCase = rename_fairseq_keys(__snake_case , __snake_case ) _UpperCamelCase = shared_weights['''decoder.embed_tokens.weight'''] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__snake_case ) == 1: _UpperCamelCase = os.path.join(__snake_case , __snake_case ) torch.save(__snake_case , __snake_case ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__snake_case , __snake_case ) # Otherwise, let's build the index _UpperCamelCase = {} for idx, shard in enumerate(__snake_case ): _UpperCamelCase = weights_name.replace('''.bin''' , f"""-{idx+1:05d}-of-{len(__snake_case ):05d}.bin""" ) _UpperCamelCase = os.path.join(__snake_case , weights_name.replace('''.bin''' , f"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) ) for key in shard: _UpperCamelCase = shard_file # Add the metadata _UpperCamelCase = {'''total_size''': total_size} _UpperCamelCase = {'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f: _UpperCamelCase = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n''' f.write(__snake_case ) return metadata, index if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--nllb_moe_checkpoint_path", default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b", type=str, required=False, help="Path to the output pytorch model.", ) _lowerCAmelCase = parser.parse_args() _lowerCAmelCase, _lowerCAmelCase = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) _lowerCAmelCase = NllbMoeConfig.from_pretrained( "facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) _lowerCAmelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("Done") model.save_pretrained(args.pytorch_dump_folder_path)
716
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = tempfile.mkdtemp() # fmt: off _UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _UpperCamelCase = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } _UpperCamelCase = os.path.join(self.tmpdirname , _A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_A , _A ) def UpperCamelCase_ ( self : Tuple , **_A : Optional[Any] ): return BertTokenizer.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : List[Any] , **_A : Union[str, Any] ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : int ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _UpperCamelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = self.get_image_processor() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCamelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = image_processor(_A , return_tensors='''np''' ) _UpperCamelCase = processor(images=_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = processor(text=_A ) _UpperCamelCase = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(_A ): processor() def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCamelCase = processor.batch_decode(_A ) _UpperCamelCase = tokenizer.batch_decode(_A ) self.assertListEqual(_A , _A ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
71
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu _lowerCAmelCase = [ "EAGER", "AOT_EAGER", "INDUCTOR", "NVFUSER", "AOT_NVFUSER", "AOT_CUDAGRAPHS", "OFI", "FX2TRT", "ONNXRT", "IPEX", ] def _snake_case ( __snake_case , __snake_case=None , __snake_case=None , __snake_case=None ): _UpperCamelCase = True while ask_again: _UpperCamelCase = input(__snake_case ) try: if default is not None and len(__snake_case ) == 0: return default return convert_value(__snake_case ) if convert_value is not None else result except Exception: if error_message is not None: print(__snake_case ) def _snake_case ( __snake_case , __snake_case=[] , __snake_case=None , __snake_case=0 ): _UpperCamelCase = BulletMenu(__snake_case , __snake_case ) _UpperCamelCase = menu.run(default_choice=__snake_case ) return convert_value(__snake_case ) if convert_value is not None else result def _snake_case ( __snake_case ): _UpperCamelCase = int(__snake_case ) return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] ) def _snake_case ( __snake_case ): _UpperCamelCase = int(__snake_case ) return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] ) def _snake_case ( __snake_case ): _UpperCamelCase = int(__snake_case ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def _snake_case ( __snake_case ): _UpperCamelCase = int(__snake_case ) return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] ) def _snake_case ( __snake_case ): _UpperCamelCase = int(__snake_case ) return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] ) def _snake_case ( __snake_case ): return {"yes": True, "no": False}[value.lower()] class lowerCAmelCase_ ( argparse.RawDescriptionHelpFormatter ): def UpperCamelCase_ ( self : Any , _A : List[str] , _A : Any , _A : Dict , _A : Union[str, Any] ): _UpperCamelCase = super()._format_usage(_A , _A , _A , _A ) _UpperCamelCase = usage.replace('''<command> [<args>] ''' , '''''' ) return usage
717
def _snake_case ( __snake_case , __snake_case , __snake_case ): if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod else: _UpperCamelCase = binary_exponentiation(__snake_case , n / 2 , __snake_case ) return (b * b) % mod # a prime number _lowerCAmelCase = 701 _lowerCAmelCase = 1_000_000_000 _lowerCAmelCase = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
71
0
def _snake_case ( __snake_case , __snake_case , __snake_case ): return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(__snake_case ) ) def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ): # Base Case if index == len(__snake_case ): return True # Recursive Step for i in range(__snake_case ): if valid_coloring(graph[index] , __snake_case , __snake_case ): # Color current vertex _UpperCamelCase = i # Validate coloring if util_color(__snake_case , __snake_case , __snake_case , index + 1 ): return True # Backtrack _UpperCamelCase = -1 return False def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = [-1] * len(__snake_case ) if util_color(__snake_case , __snake_case , __snake_case , 0 ): return colored_vertices return []
718
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = (1 - _cos) / 2 _UpperCamelCase = 1 - _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = (1 + _cos) / 2 _UpperCamelCase = -1 - _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = _sin / 2 _UpperCamelCase = 0 _UpperCamelCase = -ba _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 1 - alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = 1 + alpha * big_a _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha * big_a _UpperCamelCase = 1 + alpha / big_a _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha / big_a _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase = 2 * sqrt(__snake_case ) * alpha _UpperCamelCase = big_a * (pmc + aaa) _UpperCamelCase = 2 * big_a * mpc _UpperCamelCase = big_a * (pmc - aaa) _UpperCamelCase = ppmc + aaa _UpperCamelCase = -2 * pmpc _UpperCamelCase = ppmc - aaa _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase = 2 * sqrt(__snake_case ) * alpha _UpperCamelCase = big_a * (ppmc + aaa) _UpperCamelCase = -2 * big_a * pmpc _UpperCamelCase = big_a * (ppmc - aaa) _UpperCamelCase = pmc + aaa _UpperCamelCase = 2 * mpc _UpperCamelCase = pmc - aaa _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
71
0
import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home _lowerCAmelCase = HUGGINGFACE_HUB_CACHE _lowerCAmelCase = "config.json" _lowerCAmelCase = "diffusion_pytorch_model.bin" _lowerCAmelCase = "diffusion_flax_model.msgpack" _lowerCAmelCase = "model.onnx" _lowerCAmelCase = "diffusion_pytorch_model.safetensors" _lowerCAmelCase = "weights.pb" _lowerCAmelCase = "https://huggingface.co" _lowerCAmelCase = default_cache_path _lowerCAmelCase = "diffusers_modules" _lowerCAmelCase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules")) _lowerCAmelCase = ["fp16", "non-ema"] _lowerCAmelCase = ".self_attn"
719
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "gpt_neox" def __init__( self : Union[str, Any] , _A : Union[str, Any]=5_0432 , _A : List[Any]=6144 , _A : int=44 , _A : int=64 , _A : Optional[Any]=2_4576 , _A : Any="gelu" , _A : Tuple=0.25 , _A : Union[str, Any]=1_0000 , _A : Tuple=0.0 , _A : Any=0.0 , _A : int=0.1 , _A : List[str]=2048 , _A : Dict=0.02 , _A : Optional[Any]=1e-5 , _A : Tuple=True , _A : List[Any]=0 , _A : Optional[int]=2 , _A : Optional[int]=False , _A : List[Any]=True , _A : Any=None , **_A : Any , ): super().__init__(bos_token_id=_A , eos_token_id=_A , **_A ) _UpperCamelCase = vocab_size _UpperCamelCase = max_position_embeddings _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = rotary_pct _UpperCamelCase = rotary_emb_base _UpperCamelCase = attention_dropout _UpperCamelCase = hidden_dropout _UpperCamelCase = classifier_dropout _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = use_cache _UpperCamelCase = tie_word_embeddings _UpperCamelCase = use_parallel_residual _UpperCamelCase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def UpperCamelCase_ ( self : str ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F"""got {self.rope_scaling}""" ) _UpperCamelCase = self.rope_scaling.get('''type''' , _A ) _UpperCamelCase = self.rope_scaling.get('''factor''' , _A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
71
0
from __future__ import annotations import math def _snake_case ( __snake_case ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case ( __snake_case ): _UpperCamelCase = str(__snake_case ) _UpperCamelCase = [n] for i in range(1 , len(__snake_case ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def _snake_case ( __snake_case ): if len(str(__snake_case ) ) > 3: if not is_prime(int(str(__snake_case )[-3:] ) ) or not is_prime(int(str(__snake_case )[:3] ) ): return False return True def _snake_case ( __snake_case = 11 ): _UpperCamelCase = [] _UpperCamelCase = 13 while len(__snake_case ) != count: if validate(__snake_case ): _UpperCamelCase = list_truncated_nums(__snake_case ) if all(is_prime(__snake_case ) for i in list_nums ): list_truncated_primes.append(__snake_case ) num += 2 return list_truncated_primes def _snake_case ( ): return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(f'{sum(compute_truncated_primes(11)) = }')
720
from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=__lowercase ): UpperCAmelCase = ["keras_nlp"] def __init__( self : Any , *_A : Dict , **_A : List[str] ): requires_backends(self , ['''keras_nlp'''] )
71
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { "google/pix2struct-textcaps-base": ( "https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json" ), } class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "pix2struct_text_model" UpperCAmelCase = ["past_key_values"] UpperCAmelCase = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : List[Any] , _A : Union[str, Any]=5_0244 , _A : Dict=768 , _A : str=64 , _A : Optional[Any]=2048 , _A : Optional[Any]=12 , _A : str=12 , _A : List[str]=32 , _A : int=128 , _A : Tuple=0.1 , _A : Union[str, Any]=1e-6 , _A : str=1.0 , _A : Optional[int]="gelu_new" , _A : Tuple=0 , _A : List[str]=False , _A : Optional[Any]=0 , _A : Tuple=1 , _A : Tuple=False , _A : List[Any]=True , **_A : List[Any] , ): _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = d_kv _UpperCamelCase = d_ff _UpperCamelCase = num_layers _UpperCamelCase = num_heads _UpperCamelCase = relative_attention_num_buckets _UpperCamelCase = relative_attention_max_distance _UpperCamelCase = dropout_rate _UpperCamelCase = layer_norm_epsilon _UpperCamelCase = initializer_factor _UpperCamelCase = use_cache _UpperCamelCase = eos_token_id _UpperCamelCase = decoder_start_token_id # for backwards compatibility _UpperCamelCase = dense_act_fn super().__init__( pad_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , tie_word_embeddings=_A , is_decoder=_A , **_A , ) @classmethod def UpperCamelCase_ ( cls : Dict , _A : Union[str, os.PathLike] , **_A : List[Any] ): cls._set_token_in_kwargs(_A ) _UpperCamelCase , _UpperCamelCase = cls.get_config_dict(_A , **_A ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": _UpperCamelCase = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_A , **_A ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "pix2struct_vision_model" def __init__( self : Optional[int] , _A : str=768 , _A : str=768 , _A : Optional[Any]=2048 , _A : Optional[Any]=64 , _A : Any=12 , _A : Tuple=12 , _A : Dict="gelu_new" , _A : Any=1e-6 , _A : str=0.0 , _A : Any=0.0 , _A : Optional[Any]=1e-10 , _A : Any=1.0 , _A : int=4096 , _A : int=32 , _A : List[Any]=128 , **_A : Optional[int] , ): super().__init__(**_A ) _UpperCamelCase = hidden_size _UpperCamelCase = patch_embed_hidden_size _UpperCamelCase = d_ff _UpperCamelCase = dropout_rate _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = initializer_range _UpperCamelCase = initializer_factor _UpperCamelCase = attention_dropout _UpperCamelCase = layer_norm_eps _UpperCamelCase = dense_act_fn _UpperCamelCase = seq_len _UpperCamelCase = relative_attention_num_buckets _UpperCamelCase = relative_attention_max_distance _UpperCamelCase = d_kv @classmethod def UpperCamelCase_ ( cls : str , _A : Union[str, os.PathLike] , **_A : Any ): cls._set_token_in_kwargs(_A ) _UpperCamelCase , _UpperCamelCase = cls.get_config_dict(_A , **_A ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": _UpperCamelCase = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_A , **_A ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "pix2struct" UpperCAmelCase = True def __init__( self : Union[str, Any] , _A : Tuple=None , _A : Any=None , _A : Dict=1.0 , _A : Optional[int]=0.02 , _A : List[Any]=False , _A : List[Any]=False , _A : Any=True , **_A : str , ): super().__init__(tie_word_embeddings=_A , is_encoder_decoder=_A , **_A ) if text_config is None: _UpperCamelCase = {} logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' ) if vision_config is None: _UpperCamelCase = {} logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' ) _UpperCamelCase = PixaStructTextConfig(**_A ) _UpperCamelCase = PixaStructVisionConfig(**_A ) _UpperCamelCase = self.text_config.decoder_start_token_id _UpperCamelCase = self.text_config.pad_token_id _UpperCamelCase = self.text_config.eos_token_id _UpperCamelCase = initializer_factor _UpperCamelCase = initializer_range _UpperCamelCase = self.initializer_range _UpperCamelCase = self.initializer_range _UpperCamelCase = is_vqa @classmethod def UpperCamelCase_ ( cls : Any , _A : PixaStructTextConfig , _A : PixaStructVisionConfig , **_A : Union[str, Any] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_A ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = copy.deepcopy(self.__dict__ ) _UpperCamelCase = self.text_config.to_dict() _UpperCamelCase = self.vision_config.to_dict() _UpperCamelCase = self.__class__.model_type return output
721
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = "RegNetConfig" # Base docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = [1, 1_088, 7, 7] # Image classification docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = "tabby, tabby cat" _lowerCAmelCase = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , **_A : Any , ): super().__init__(**_A ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=_A , strides=_A , padding='''VALID''' , groups=_A , use_bias=_A , name='''convolution''' , ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) _UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity def UpperCamelCase_ ( self : Any , _A : Any ): _UpperCamelCase = self.convolution(self.padding(_A ) ) _UpperCamelCase = self.normalization(_A ) _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] , _A : RegNetConfig , **_A : Any ): super().__init__(**_A ) _UpperCamelCase = config.num_channels _UpperCamelCase = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ): _UpperCamelCase = shape_list(_A )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _UpperCamelCase = tf.transpose(_A , perm=(0, 2, 3, 1) ) _UpperCamelCase = self.embedder(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 2 , **_A : Optional[Any] ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=1 , strides=_A , use_bias=_A , name='''convolution''' ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def UpperCamelCase_ ( self : str , _A : tf.Tensor , _A : bool = False ): return self.normalization(self.convolution(_A ) , training=_A ) class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Dict , _A : int , _A : int , **_A : Dict ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) _UpperCamelCase = [ tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def UpperCamelCase_ ( self : List[str] , _A : List[Any] ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] _UpperCamelCase = self.pooler(_A ) for layer_module in self.attention: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = hidden_state * pooled return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : str ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.2''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Dict , _A : Tuple ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : int ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.3''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Tuple , _A : List[Any] ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Tuple , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , **_A : Union[str, Any] ): super().__init__(**_A ) _UpperCamelCase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer _UpperCamelCase = [ # downsampling is done in the first layer with stride of 2 layer(_A , _A , _A , stride=_A , name='''layers.0''' ), *[layer(_A , _A , _A , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[int] ): for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , **_A : List[str] ): super().__init__(**_A ) _UpperCamelCase = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) _UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_A , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_A , _A , _A , depth=_A , name=F"""stages.{i+1}""" ) ) def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : bool = False , _A : bool = True ): _UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) _UpperCamelCase = stage_module(_A ) if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A ) @keras_serializable class lowerCAmelCase_ ( tf.keras.layers.Layer ): UpperCAmelCase = RegNetConfig def __init__( self : int , _A : Tuple , **_A : int ): super().__init__(**_A ) _UpperCamelCase = config _UpperCamelCase = TFRegNetEmbeddings(_A , name='''embedder''' ) _UpperCamelCase = TFRegNetEncoder(_A , name='''encoder''' ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) @unpack_inputs def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.embedder(_A , training=_A ) _UpperCamelCase = self.encoder( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = encoder_outputs[0] _UpperCamelCase = self.pooler(_A ) # Change to NCHW output format have uniformity in the modules _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _UpperCamelCase = tuple([tf.transpose(_A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_A , pooler_output=_A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = RegNetConfig UpperCAmelCase = "regnet" UpperCAmelCase = "pixel_values" @property def UpperCamelCase_ ( self : Tuple ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} _lowerCAmelCase = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n" _lowerCAmelCase = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top.", __lowercase, ) class lowerCAmelCase_ ( __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Optional[int] , **_A : Tuple ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase_ ( self : Any , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[int]=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( pixel_values=_A , output_hidden_states=_A , return_dict=_A , training=_A , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", __lowercase, ) class lowerCAmelCase_ ( __lowercase, __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Any , **_A : int ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = config.num_labels _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) # classification head _UpperCamelCase = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase_ ( self : str , _A : tf.Tensor = None , _A : tf.Tensor = None , _A : bool = None , _A : bool = None , _A : Any=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCamelCase = self.classifier[0](_A ) _UpperCamelCase = self.classifier[1](_A ) _UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=_A , logits=_A ) if not return_dict: _UpperCamelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
71
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ["PLBartTokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "PLBART_PRETRAINED_MODEL_ARCHIVE_LIST", "PLBartForCausalLM", "PLBartForConditionalGeneration", "PLBartForSequenceClassification", "PLBartModel", "PLBartPreTrainedModel", ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
700
from sklearn.metrics import mean_squared_error import datasets _lowerCAmelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" _lowerCAmelCase = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n" _lowerCAmelCase = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def UpperCamelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def UpperCamelCase_ ( self : Dict ): if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : List[str] , _A : Dict=None , _A : List[str]="uniform_average" , _A : int=True ): _UpperCamelCase = mean_squared_error( _A , _A , sample_weight=_A , multioutput=_A , squared=_A ) return {"mse": mse}
71
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase = { "configuration_jukebox": [ "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "JukeboxConfig", "JukeboxPriorConfig", "JukeboxVQVAEConfig", ], "tokenization_jukebox": ["JukeboxTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST", "JukeboxModel", "JukeboxPreTrainedModel", "JukeboxVQVAE", "JukeboxPrior", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
701
import os import re import shutil import sys import tempfile import unittest import black _lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) _UpperCamelCase = self.diffusers_dir shutil.copy( os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def UpperCamelCase_ ( self : Union[str, Any] , _A : Tuple , _A : Optional[Any] , _A : Dict , _A : List[str]=None ): _UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: _UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result _UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) _UpperCamelCase = black.format_str(_A , mode=_A ) _UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(_A , '''w''' , newline='''\n''' ) as f: f.write(_A ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_A ) with open(_A , '''r''' ) as f: self.assertTrue(f.read() , _A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(_A , _A ) def UpperCamelCase_ ( self : Optional[Any] ): # Base copy consistency self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , ) # Copy consistency with a really long name _UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
71
0
import random from .binary_exp_mod import bin_exp_mod def _snake_case ( __snake_case , __snake_case=1000 ): if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd _UpperCamelCase = n - 1 _UpperCamelCase = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) _UpperCamelCase = 0 while count < prec: _UpperCamelCase = random.randint(2 , n - 1 ) _UpperCamelCase = bin_exp_mod(__snake_case , __snake_case , __snake_case ) if b != 1: _UpperCamelCase = True for _ in range(__snake_case ): if b == n - 1: _UpperCamelCase = False break _UpperCamelCase = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": _lowerCAmelCase = abs(int(input("Enter bound : ").strip())) print("Here's the list of primes:") print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
702
from __future__ import annotations import math class lowerCAmelCase_ : def __init__( self : int , _A : int ): _UpperCamelCase = size # approximate the overall size of segment tree with given value _UpperCamelCase = [0 for i in range(0 , 4 * size )] # create array to store lazy update _UpperCamelCase = [0 for i in range(0 , 4 * size )] _UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update def UpperCamelCase_ ( self : str , _A : int ): return idx * 2 def UpperCamelCase_ ( self : Any , _A : int ): return idx * 2 + 1 def UpperCamelCase_ ( self : Union[str, Any] , _A : int , _A : int , _A : int , _A : list[int] ): if left_element == right_element: _UpperCamelCase = a[left_element - 1] else: _UpperCamelCase = (left_element + right_element) // 2 self.build(self.left(_A ) , _A , _A , _A ) self.build(self.right(_A ) , mid + 1 , _A , _A ) _UpperCamelCase = max( self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] ) def UpperCamelCase_ ( self : Tuple , _A : int , _A : int , _A : int , _A : int , _A : int , _A : int ): if self.flag[idx] is True: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = False if left_element != right_element: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = self.lazy[idx] _UpperCamelCase = True _UpperCamelCase = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: _UpperCamelCase = val if left_element != right_element: _UpperCamelCase = val _UpperCamelCase = val _UpperCamelCase = True _UpperCamelCase = True return True _UpperCamelCase = (left_element + right_element) // 2 self.update(self.left(_A ) , _A , _A , _A , _A , _A ) self.update(self.right(_A ) , mid + 1 , _A , _A , _A , _A ) _UpperCamelCase = max( self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] ) return True def UpperCamelCase_ ( self : Any , _A : int , _A : int , _A : int , _A : int , _A : int ): if self.flag[idx] is True: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = False if left_element != right_element: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = self.lazy[idx] _UpperCamelCase = True _UpperCamelCase = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] _UpperCamelCase = (left_element + right_element) // 2 _UpperCamelCase = self.query(self.left(_A ) , _A , _A , _A , _A ) _UpperCamelCase = self.query(self.right(_A ) , mid + 1 , _A , _A , _A ) return max(_A , _A ) def __str__( self : Tuple ): return str([self.query(1 , 1 , self.size , _A , _A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": _lowerCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] _lowerCAmelCase = 15 _lowerCAmelCase = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
71
0
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCAmelCase_ : def __init__( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=30 , _A : Any=2 , _A : Optional[int]=3 , _A : Optional[Any]=True , _A : Dict=True , _A : Optional[Any]=32 , _A : Optional[Any]=5 , _A : List[str]=4 , _A : Tuple=37 , _A : Optional[int]="gelu" , _A : int=0.1 , _A : List[Any]=0.1 , _A : int=10 , _A : Any=0.02 , _A : int=3 , _A : Optional[Any]=None , _A : Optional[int]=2 , ): _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = image_size _UpperCamelCase = patch_size _UpperCamelCase = num_channels _UpperCamelCase = is_training _UpperCamelCase = use_labels _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = scope _UpperCamelCase = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) _UpperCamelCase = (image_size // patch_size) ** 2 _UpperCamelCase = num_patches + 2 def UpperCamelCase_ ( self : Any ): _UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCamelCase = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self : Tuple ): return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCamelCase_ ( self : str , _A : List[str] , _A : str , _A : Tuple ): _UpperCamelCase = DeiTModel(config=_A ) model.to(_A ) model.eval() _UpperCamelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self : Optional[int] , _A : Dict , _A : Optional[Any] , _A : Any ): _UpperCamelCase = DeiTForMaskedImageModeling(config=_A ) model.to(_A ) model.eval() _UpperCamelCase = model(_A ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _UpperCamelCase = 1 _UpperCamelCase = DeiTForMaskedImageModeling(_A ) model.to(_A ) model.eval() _UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCamelCase = model(_A ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCamelCase_ ( self : int , _A : Dict , _A : Optional[Any] , _A : int ): _UpperCamelCase = self.type_sequence_label_size _UpperCamelCase = DeiTForImageClassification(_A ) model.to(_A ) model.eval() _UpperCamelCase = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _UpperCamelCase = 1 _UpperCamelCase = DeiTForImageClassification(_A ) model.to(_A ) model.eval() _UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCamelCase = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ): UpperCAmelCase = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) UpperCAmelCase = ( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase_ ( self : int ): _UpperCamelCase = DeiTModelTester(self ) _UpperCamelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def UpperCamelCase_ ( self : Dict ): self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def UpperCamelCase_ ( self : List[Any] ): pass def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear ) ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(_A ) _UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase = [*signature.parameters.keys()] _UpperCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) def UpperCamelCase_ ( self : Optional[Any] , _A : int , _A : List[Any] , _A : Any=False ): _UpperCamelCase = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def UpperCamelCase_ ( self : str ): if not self.model_tester.is_training: return _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(_A ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue _UpperCamelCase = model_class(_A ) model.to(_A ) model.train() _UpperCamelCase = self._prepare_for_class(_A , _A , return_labels=_A ) _UpperCamelCase = model(**_A ).loss loss.backward() def UpperCamelCase_ ( self : str ): _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _UpperCamelCase = False _UpperCamelCase = True for model_class in self.all_model_classes: if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue _UpperCamelCase = model_class(_A ) model.gradient_checkpointing_enable() model.to(_A ) model.train() _UpperCamelCase = self._prepare_for_class(_A , _A , return_labels=_A ) _UpperCamelCase = model(**_A ).loss loss.backward() def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase = [ {'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float}, {'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long}, {'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(_A ), *get_values(_A ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ): _UpperCamelCase = problem_type['''title'''] _UpperCamelCase = problem_type['''num_labels'''] _UpperCamelCase = model_class(_A ) model.to(_A ) model.train() _UpperCamelCase = self._prepare_for_class(_A , _A , return_labels=_A ) if problem_type["num_labels"] > 1: _UpperCamelCase = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] ) _UpperCamelCase = inputs['''labels'''].to(problem_type['''dtype'''] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=_A ) as warning_list: _UpperCamelCase = model(**_A ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def UpperCamelCase_ ( self : List[Any] ): for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = DeiTModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def _snake_case ( ): _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self : int ): return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to( _A ) _UpperCamelCase = self.default_image_processor _UpperCamelCase = prepare_img() _UpperCamelCase = image_processor(images=_A , return_tensors='''pt''' ).to(_A ) # forward pass with torch.no_grad(): _UpperCamelCase = model(**_A ) # verify the logits _UpperCamelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _A ) _UpperCamelCase = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def UpperCamelCase_ ( self : int ): _UpperCamelCase = DeiTModel.from_pretrained( '''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' ) _UpperCamelCase = self.default_image_processor _UpperCamelCase = prepare_img() _UpperCamelCase = image_processor(images=_A , return_tensors='''pt''' ) _UpperCamelCase = inputs.pixel_values.to(_A ) # forward pass to make sure inference works in fp16 with torch.no_grad(): _UpperCamelCase = model(_A )
703
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase = { "configuration_jukebox": [ "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "JukeboxConfig", "JukeboxPriorConfig", "JukeboxVQVAEConfig", ], "tokenization_jukebox": ["JukeboxTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST", "JukeboxModel", "JukeboxPreTrainedModel", "JukeboxVQVAE", "JukeboxPrior", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
71
0
from __future__ import annotations class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : str , _A : str ): _UpperCamelCase , _UpperCamelCase = text, pattern _UpperCamelCase , _UpperCamelCase = len(_A ), len(_A ) def UpperCamelCase_ ( self : List[Any] , _A : str ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def UpperCamelCase_ ( self : Union[str, Any] , _A : int ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def UpperCamelCase_ ( self : List[str] ): # searches pattern in text and returns index positions _UpperCamelCase = [] for i in range(self.textLen - self.patLen + 1 ): _UpperCamelCase = self.mismatch_in_text(_A ) if mismatch_index == -1: positions.append(_A ) else: _UpperCamelCase = self.match_in_pattern(self.text[mismatch_index] ) _UpperCamelCase = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions _lowerCAmelCase = "ABAABA" _lowerCAmelCase = "AB" _lowerCAmelCase = BoyerMooreSearch(text, pattern) _lowerCAmelCase = bms.bad_character_heuristic() if len(positions) == 0: print("No match found") else: print("Pattern found in following positions: ") print(positions)
704
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class lowerCAmelCase_ ( __lowercase ): def __init__( self : int , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : str , ): super().__init__( _A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , ) _UpperCamelCase = field _UpperCamelCase = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths} _UpperCamelCase = Json( cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , ) def UpperCamelCase_ ( self : List[str] ): # Build iterable dataset if self.streaming: _UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None self.builder.download_and_prepare( download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , ) _UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=_A , in_memory=self.keep_in_memory ) return dataset class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : List[str] , ): if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) _UpperCamelCase = dataset _UpperCamelCase = path_or_buf _UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _UpperCamelCase = num_proc _UpperCamelCase = '''utf-8''' _UpperCamelCase = to_json_kwargs def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = self.to_json_kwargs.pop('''path_or_buf''' , _A ) _UpperCamelCase = self.to_json_kwargs.pop('''orient''' , '''records''' ) _UpperCamelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False ) _UpperCamelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True ) _UpperCamelCase = self.to_json_kwargs.pop('''compression''' , _A ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , '''wb''' , compression=_A ) as buffer: _UpperCamelCase = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F"""The compression parameter is not supported when writing to a buffer, but compression={compression}""" ''' was passed. Please provide a local path instead.''' ) _UpperCamelCase = self._write( file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) return written def UpperCamelCase_ ( self : Any , _A : Optional[Any] ): _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = args _UpperCamelCase = query_table( table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , ) _UpperCamelCase = batch.to_pandas().to_json( path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A ) if not json_str.endswith('''\n''' ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCamelCase_ ( self : int , _A : BinaryIO , _A : Dict , _A : Optional[Any] , _A : Dict , **_A : str , ): _UpperCamelCase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): _UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(_A ) else: _UpperCamelCase , _UpperCamelCase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): written += file_obj.write(_A ) return written
71
0
from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[str]=13 , _A : Union[str, Any]=7 , _A : int=True , _A : Optional[int]=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : Optional[int]=99 , _A : Union[str, Any]=32 , _A : Dict=2 , _A : List[Any]=4 , _A : Optional[Any]=37 , _A : int="gelu" , _A : Optional[int]=0.1 , _A : str=0.1 , _A : List[str]=512 , _A : Optional[Any]=16 , _A : Optional[Any]=2 , _A : Optional[int]=0.02 , _A : str=False , _A : int=True , _A : Any="None" , _A : Dict=3 , _A : List[Any]=4 , _A : Optional[Any]=None , ): _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_input_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_labels _UpperCamelCase = num_choices _UpperCamelCase = relative_attention _UpperCamelCase = position_biased_input _UpperCamelCase = pos_att_type _UpperCamelCase = scope def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_input_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCamelCase = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : int , _A : Optional[Any] ): _UpperCamelCase = TFDebertaVaModel(config=_A ) _UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCamelCase = [input_ids, input_mask] _UpperCamelCase = model(_A ) _UpperCamelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self : Dict , _A : Optional[int] , _A : Any , _A : Dict , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] ): _UpperCamelCase = TFDebertaVaForMaskedLM(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self : Dict , _A : Dict , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : Optional[Any] , _A : Tuple , _A : int ): _UpperCamelCase = self.num_labels _UpperCamelCase = TFDebertaVaForSequenceClassification(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : Optional[int] , _A : Any , _A : List[Any] , _A : Dict , _A : Union[str, Any] , _A : List[str] ): _UpperCamelCase = self.num_labels _UpperCamelCase = TFDebertaVaForTokenClassification(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self : Dict , _A : Optional[Any] , _A : Optional[int] , _A : Any , _A : List[str] , _A : str , _A : Optional[int] , _A : str ): _UpperCamelCase = TFDebertaVaForQuestionAnswering(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self : Any ): _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ): UpperCAmelCase = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) UpperCAmelCase = ( { "feature-extraction": TFDebertaVaModel, "fill-mask": TFDebertaVaForMaskedLM, "question-answering": TFDebertaVaForQuestionAnswering, "text-classification": TFDebertaVaForSequenceClassification, "token-classification": TFDebertaVaForTokenClassification, "zero-shot": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = TFDebertaVaModelTester(self ) _UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 ) def UpperCamelCase_ ( self : Any ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) self.assertIsNotNone(_A ) @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def UpperCamelCase_ ( self : List[Any] ): pass @slow def UpperCamelCase_ ( self : int ): _UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) _UpperCamelCase = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) _UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _UpperCamelCase = model(_A , attention_mask=_A )[0] _UpperCamelCase = tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1e-4 )
705
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class lowerCAmelCase_ ( enum.Enum ): UpperCAmelCase = 0 UpperCAmelCase = 1 UpperCAmelCase = 2 @add_end_docstrings(__lowercase ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self : Tuple , *_A : List[str] , **_A : str ): super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _UpperCamelCase = None if self.model.config.prefix is not None: _UpperCamelCase = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _UpperCamelCase = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._sanitize_parameters(prefix=_A , **self._forward_params ) _UpperCamelCase = {**self._preprocess_params, **preprocess_params} _UpperCamelCase = {**self._forward_params, **forward_params} def UpperCamelCase_ ( self : Dict , _A : Optional[int]=None , _A : Any=None , _A : Optional[int]=None , _A : List[str]=None , _A : List[Any]=None , _A : int=None , _A : Tuple=None , _A : Optional[Any]=None , **_A : Optional[int] , ): _UpperCamelCase = {} if prefix is not None: _UpperCamelCase = prefix if prefix: _UpperCamelCase = self.tokenizer( _A , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) _UpperCamelCase = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected""" ''' [None, \'hole\']''' ) _UpperCamelCase = handle_long_generation preprocess_params.update(_A ) _UpperCamelCase = generate_kwargs _UpperCamelCase = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) _UpperCamelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) _UpperCamelCase = ReturnType.TENSORS if return_type is not None: _UpperCamelCase = return_type if clean_up_tokenization_spaces is not None: _UpperCamelCase = clean_up_tokenization_spaces if stop_sequence is not None: _UpperCamelCase = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) _UpperCamelCase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCamelCase_ ( self : int , *_A : Union[str, Any] , **_A : Union[str, Any] ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_A , **_A ) def __call__( self : List[str] , _A : str , **_A : Any ): return super().__call__(_A , **_A ) def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : int="" , _A : Optional[Any]=None , **_A : Optional[Any] ): _UpperCamelCase = self.tokenizer( prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) _UpperCamelCase = prompt_text if handle_long_generation == "hole": _UpperCamelCase = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: _UpperCamelCase = generate_kwargs['''max_new_tokens'''] else: _UpperCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _UpperCamelCase = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) _UpperCamelCase = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: _UpperCamelCase = inputs['''attention_mask'''][:, -keep_length:] return inputs def UpperCamelCase_ ( self : Dict , _A : Optional[int] , **_A : str ): _UpperCamelCase = model_inputs['''input_ids'''] _UpperCamelCase = model_inputs.get('''attention_mask''' , _A ) # Allow empty prompts if input_ids.shape[1] == 0: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = 1 else: _UpperCamelCase = input_ids.shape[0] _UpperCamelCase = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _UpperCamelCase = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: _UpperCamelCase = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: _UpperCamelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _UpperCamelCase = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _UpperCamelCase = self.model.generate(input_ids=_A , attention_mask=_A , **_A ) _UpperCamelCase = generated_sequence.shape[0] if self.framework == "pt": _UpperCamelCase = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _UpperCamelCase = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def UpperCamelCase_ ( self : List[str] , _A : Dict , _A : Optional[Any]=ReturnType.FULL_TEXT , _A : Dict=True ): _UpperCamelCase = model_outputs['''generated_sequence'''][0] _UpperCamelCase = model_outputs['''input_ids'''] _UpperCamelCase = model_outputs['''prompt_text'''] _UpperCamelCase = generated_sequence.numpy().tolist() _UpperCamelCase = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _UpperCamelCase = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _UpperCamelCase = self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _UpperCamelCase = 0 else: _UpperCamelCase = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) ) if return_type == ReturnType.FULL_TEXT: _UpperCamelCase = prompt_text + text[prompt_length:] else: _UpperCamelCase = text[prompt_length:] _UpperCamelCase = {'''generated_text''': all_text} records.append(_A ) return records
71
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {"vocab_file": "spiece.model"} _lowerCAmelCase = { "vocab_file": { "bert_for_seq_generation": ( "https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model" ), } } _lowerCAmelCase = {"bert_for_seq_generation": 512} class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = VOCAB_FILES_NAMES UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase = [] UpperCAmelCase = ["input_ids", "attention_mask"] def __init__( self : Any , _A : List[str] , _A : List[Any]="<s>" , _A : List[str]="</s>" , _A : List[str]="<unk>" , _A : Any="<pad>" , _A : int="<::::>" , _A : Optional[Dict[str, Any]] = None , **_A : List[Any] , ): _UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=_A , eos_token=_A , unk_token=_A , pad_token=_A , sep_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) _UpperCamelCase = vocab_file _UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_A ) @property def UpperCamelCase_ ( self : Optional[Any] ): return self.sp_model.get_piece_size() def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[int] ): _UpperCamelCase = self.__dict__.copy() _UpperCamelCase = None return state def __setstate__( self : List[Any] , _A : List[Any] ): _UpperCamelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCamelCase = {} _UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self : Optional[Any] , _A : str ): return self.sp_model.encode(_A , out_type=_A ) def UpperCamelCase_ ( self : Dict , _A : List[str] ): return self.sp_model.piece_to_id(_A ) def UpperCamelCase_ ( self : Union[str, Any] , _A : Any ): _UpperCamelCase = self.sp_model.IdToPiece(_A ) return token def UpperCamelCase_ ( self : str , _A : Dict ): _UpperCamelCase = [] _UpperCamelCase = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_A ) + token _UpperCamelCase = [] else: current_sub_tokens.append(_A ) out_string += self.sp_model.decode(_A ) return out_string.strip() def UpperCamelCase_ ( self : Union[str, Any] , _A : str , _A : Optional[str] = None ): if not os.path.isdir(_A ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _UpperCamelCase = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _A ) elif not os.path.isfile(self.vocab_file ): with open(_A , '''wb''' ) as fi: _UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(_A ) return (out_vocab_file,)
706
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A ) _UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids _UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids _UpperCamelCase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss _UpperCamelCase = -(labels.shape[-1] * loss.item()) _UpperCamelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
71
0
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( __lowercase, unittest.TestCase ): UpperCAmelCase = CanineTokenizer UpperCAmelCase = False def UpperCamelCase_ ( self : List[str] ): super().setUp() _UpperCamelCase = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCamelCase_ ( self : int ): return CanineTokenizer.from_pretrained('''google/canine-s''' ) def UpperCamelCase_ ( self : int , **_A : Dict ): _UpperCamelCase = self.tokenizer_class.from_pretrained(self.tmpdirname , **_A ) _UpperCamelCase = 1024 return tokenizer @require_torch def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.canine_tokenizer _UpperCamelCase = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.'''] # fmt: off _UpperCamelCase = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0] # fmt: on _UpperCamelCase = tokenizer(_A , padding=_A , return_tensors='''pt''' ) self.assertIsInstance(_A , _A ) _UpperCamelCase = list(batch.input_ids.numpy()[0] ) self.assertListEqual(_A , _A ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.canine_tokenizer _UpperCamelCase = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.'''] _UpperCamelCase = tokenizer(_A , padding=_A , return_tensors='''pt''' ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn('''input_ids''' , _A ) self.assertIn('''attention_mask''' , _A ) self.assertIn('''token_type_ids''' , _A ) @require_torch def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.canine_tokenizer _UpperCamelCase = [ '''What\'s the weater?''', '''It\'s about 25 degrees.''', ] _UpperCamelCase = tokenizer( text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def UpperCamelCase_ ( self : Any ): # safety check on max_len default value so we are sure the test works _UpperCamelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test _UpperCamelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = ''' He is very happy, UNwant\u00E9d,running''' _UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) _UpperCamelCase = tokenizer.__class__.from_pretrained(_A ) _UpperCamelCase = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) shutil.rmtree(_A ) _UpperCamelCase = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = ''' He is very happy, UNwant\u00E9d,running''' _UpperCamelCase = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: _UpperCamelCase = chr(0xe_007 ) additional_special_tokens.append(_A ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) _UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) _UpperCamelCase = tokenizer.__class__.from_pretrained(_A ) _UpperCamelCase = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) self.assertIn(_A , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) _UpperCamelCase = tokenizer.__class__.from_pretrained(_A , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_A ) def UpperCamelCase_ ( self : str ): _UpperCamelCase = self.get_tokenizers(do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): _UpperCamelCase , _UpperCamelCase = self.get_clean_sequence(_A ) # a special token for Canine can be defined as follows: _UpperCamelCase = 0xe_005 _UpperCamelCase = chr(_A ) tokenizer.add_special_tokens({'''cls_token''': special_token} ) _UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A ) self.assertEqual(len(_A ) , 1 ) _UpperCamelCase = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_A ) _UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A ) _UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A ) _UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A ) self.assertEqual(_A , input_encoded + special_token_id ) _UpperCamelCase = tokenizer.decode(_A , skip_special_tokens=_A ) self.assertTrue(special_token not in decoded ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_tokenizers(do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): _UpperCamelCase = chr(0xe_005 ) _UpperCamelCase = chr(0xe_006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_A ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} ) _UpperCamelCase = tokenizer.tokenize(_A ) _UpperCamelCase = tokenizer.tokenize(_A ) self.assertEqual(len(_A ) , 1 ) self.assertEqual(len(_A ) , 1 ) self.assertEqual(token_a[0] , _A ) self.assertEqual(token_a[0] , _A ) @require_tokenizers def UpperCamelCase_ ( self : str ): _UpperCamelCase = self.get_tokenizers(do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # a special token for Canine can be defined as follows: _UpperCamelCase = 0xe_006 _UpperCamelCase = chr(_A ) _UpperCamelCase = AddedToken(_A , lstrip=_A ) tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(_A ) tokenizer.from_pretrained(_A ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: _UpperCamelCase = json.load(_A ) with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: _UpperCamelCase = json.load(_A ) # a special token for Canine can be defined as follows: _UpperCamelCase = 0xe_006 _UpperCamelCase = chr(_A ) _UpperCamelCase = [new_token_a] _UpperCamelCase = [new_token_a] with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(_A , _A ) with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(_A , _A ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _UpperCamelCase = tokenizer_class.from_pretrained(_A , extra_ids=0 ) self.assertIn(_A , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) _UpperCamelCase = 0xe_007 _UpperCamelCase = chr(_A ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _UpperCamelCase = [AddedToken(_A , lstrip=_A )] _UpperCamelCase = tokenizer_class.from_pretrained( _A , additional_special_tokens=_A , extra_ids=0 ) self.assertIn(_A , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.get_tokenizers(do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): _UpperCamelCase = '''hello world''' if self.space_between_special_tokens: _UpperCamelCase = '''[CLS] hello world [SEP]''' else: _UpperCamelCase = input _UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A ) _UpperCamelCase = tokenizer.decode(_A , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(_A , [output, output.lower()] ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): _UpperCamelCase = [ '''bos_token''', '''eos_token''', '''unk_token''', '''sep_token''', '''pad_token''', '''cls_token''', '''mask_token''', ] _UpperCamelCase = '''a''' _UpperCamelCase = ord(_A ) for attr in attributes_list: setattr(_A , attr + '''_id''' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '''_id''' ) , _A ) setattr(_A , attr + '''_id''' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '''_id''' ) , _A ) setattr(_A , '''additional_special_tokens_ids''' , [] ) self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [] ) self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [] ) _UpperCamelCase = 0xe_006 _UpperCamelCase = chr(_A ) setattr(_A , '''additional_special_tokens_ids''' , [additional_special_token_id] ) self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [additional_special_token] ) self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [additional_special_token_id] ) def UpperCamelCase_ ( self : str ): pass def UpperCamelCase_ ( self : str ): pass def UpperCamelCase_ ( self : Dict ): pass def UpperCamelCase_ ( self : Optional[int] ): pass def UpperCamelCase_ ( self : Union[str, Any] ): pass def UpperCamelCase_ ( self : Dict ): pass def UpperCamelCase_ ( self : str ): pass def UpperCamelCase_ ( self : Optional[Any] ): pass
707
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _lowerCAmelCase = logging.getLogger(__name__) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether tp freeze the encoder."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) UpperCAmelCase = field( default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, ) UpperCAmelCase = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field( default=142, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) UpperCAmelCase = field( default=142, metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field(default=-1, metadata={"help": "# training examples. -1 means use all."} ) UpperCAmelCase = field(default=-1, metadata={"help": "# validation examples. -1 means use all."} ) UpperCAmelCase = field(default=-1, metadata={"help": "# test examples. -1 means use all."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Source language id for translation."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Target language id for translation."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "# num_beams to use for evaluation."} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, ) def _snake_case ( __snake_case , __snake_case , __snake_case ): logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(__snake_case , os.path.join(__snake_case , f"""{split}_results.json""" ) ) def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses() check_output_dir(__snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __snake_case ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__snake_case , __snake_case , __snake_case ): assert hasattr(__snake_case , __snake_case ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) ) _UpperCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__snake_case , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__snake_case , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _UpperCamelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__snake_case , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__snake_case , __snake_case ): _UpperCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _UpperCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__snake_case ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _UpperCamelCase = SeqaSeqDataset # Get datasets _UpperCamelCase = ( dataset_class( __snake_case , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) _UpperCamelCase = ( dataset_class( __snake_case , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _UpperCamelCase = ( dataset_class( __snake_case , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer _UpperCamelCase = ( build_compute_metrics_fn(data_args.task , __snake_case ) if training_args.predict_with_generate else None ) _UpperCamelCase = SeqaSeqTrainer( model=__snake_case , args=__snake_case , data_args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , data_collator=SeqaSeqDataCollator( __snake_case , __snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__snake_case , tokenizer=__snake_case , ) _UpperCamelCase = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) _UpperCamelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _UpperCamelCase = train_result.metrics _UpperCamelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _UpperCamelCase = trainer.evaluate(metric_key_prefix='''val''' ) _UpperCamelCase = data_args.n_val _UpperCamelCase = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) if training_args.do_predict: logger.info('''*** Predict ***''' ) _UpperCamelCase = trainer.predict(test_dataset=__snake_case , metric_key_prefix='''test''' ) _UpperCamelCase = test_output.metrics _UpperCamelCase = data_args.n_test if trainer.is_world_process_zero(): _UpperCamelCase = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) if training_args.predict_with_generate: _UpperCamelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case ) _UpperCamelCase = lmap(str.strip , __snake_case ) write_txt_file(__snake_case , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__snake_case , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def _snake_case ( __snake_case ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
71
0
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers _lowerCAmelCase = "python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py') def _snake_case ( __snake_case , __snake_case=None ): require_version(deps[pkg] , __snake_case )
708
from __future__ import annotations import typing from collections import Counter def _snake_case ( __snake_case ): _UpperCamelCase = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(__snake_case , max_perimeter + 1 ): _UpperCamelCase = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(__snake_case ): _UpperCamelCase = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def _snake_case ( __snake_case = 1000 ): _UpperCamelCase = pythagorean_triple(__snake_case ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(f'Perimeter {solution()} has maximum solutions')
71
0
import re def _snake_case ( __snake_case ): if len(re.findall('''[ATCG]''' , __snake_case ) ) != len(__snake_case ): raise ValueError('''Invalid Strand''' ) return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
709
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = (DPMSolverSDEScheduler,) UpperCAmelCase = 10 def UpperCamelCase_ ( self : Tuple , **_A : Union[str, Any] ): _UpperCamelCase = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**_A ) return config def UpperCamelCase_ ( self : List[Any] ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def UpperCamelCase_ ( self : List[Any] ): for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def UpperCamelCase_ ( self : List[str] ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_A ) def UpperCamelCase_ ( self : Union[str, Any] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for i, t in enumerate(scheduler.timesteps ): _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2 assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for i, t in enumerate(scheduler.timesteps ): _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2 assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2 assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2 assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3 def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps , device=_A ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2 assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A , use_karras_sigmas=_A ) scheduler.set_timesteps(self.num_inference_steps , device=_A ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for t in scheduler.timesteps: _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
71
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase = {"configuration_mmbt": ["MMBTConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
710
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCAmelCase_ : @property def UpperCamelCase_ ( self : Optional[int] ): return self.get_dummy_input() @property def UpperCamelCase_ ( self : Dict ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def UpperCamelCase_ ( self : Union[str, Any] , _A : List[str]=True , _A : Any=False , _A : Union[str, Any]=False , _A : int=False , ): _UpperCamelCase = 4 _UpperCamelCase = 32 _UpperCamelCase = (32, 32) _UpperCamelCase = torch.manual_seed(0 ) _UpperCamelCase = torch.device(_A ) _UpperCamelCase = (batch_size, num_channels) + sizes _UpperCamelCase = randn_tensor(_A , generator=_A , device=_A ) _UpperCamelCase = {'''hidden_states''': hidden_states} if include_temb: _UpperCamelCase = 128 _UpperCamelCase = randn_tensor((batch_size, temb_channels) , generator=_A , device=_A ) if include_res_hidden_states_tuple: _UpperCamelCase = torch.manual_seed(1 ) _UpperCamelCase = (randn_tensor(_A , generator=_A , device=_A ),) if include_encoder_hidden_states: _UpperCamelCase = floats_tensor((batch_size, 32, 32) ).to(_A ) if include_skip_sample: _UpperCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_A , device=_A ) return dummy_input def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = { '''in_channels''': 32, '''out_channels''': 32, '''temb_channels''': 128, } if self.block_type == "up": _UpperCamelCase = 32 if self.block_type == "mid": init_dict.pop('''out_channels''' ) _UpperCamelCase = self.dummy_input return init_dict, inputs_dict def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.block_class(**_A ) unet_block.to(_A ) unet_block.eval() with torch.no_grad(): _UpperCamelCase = unet_block(**_A ) if isinstance(_A , _A ): _UpperCamelCase = output[0] self.assertEqual(output.shape , self.output_shape ) _UpperCamelCase = output[0, -1, -3:, -3:] _UpperCamelCase = torch.tensor(_A ).to(_A ) assert torch_all_close(output_slice.flatten() , _A , atol=5e-3 ) @unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' ) def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.block_class(**_A ) model.to(_A ) model.train() _UpperCamelCase = model(**_A ) if isinstance(_A , _A ): _UpperCamelCase = output[0] _UpperCamelCase = torch.device(_A ) _UpperCamelCase = randn_tensor(output.shape , device=_A ) _UpperCamelCase = torch.nn.functional.mse_loss(_A , _A ) loss.backward()
71
0
def _snake_case ( __snake_case , __snake_case = False ): if not isinstance(__snake_case , __snake_case ): _UpperCamelCase = f"""Expected string as input, found {type(__snake_case )}""" raise ValueError(__snake_case ) if not isinstance(__snake_case , __snake_case ): _UpperCamelCase = f"""Expected boolean as use_pascal parameter, found {type(__snake_case )}""" raise ValueError(__snake_case ) _UpperCamelCase = input_str.split('''_''' ) _UpperCamelCase = 0 if use_pascal else 1 _UpperCamelCase = words[start_index:] _UpperCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize] _UpperCamelCase = '''''' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
711
def _snake_case ( __snake_case ): if not isinstance(__snake_case , __snake_case ): raise TypeError('''Input value must be an \'int\' type''' ) _UpperCamelCase = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
71
0
def _snake_case ( __snake_case ): def merge(__snake_case , __snake_case ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(__snake_case ) <= 1: return collection _UpperCamelCase = len(__snake_case ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip() _lowerCAmelCase = [int(item) for item in user_input.split(",")] print(*merge_sort(unsorted), sep=",")
712
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } _lowerCAmelCase = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): for attribute in key.split('''.''' ): _UpperCamelCase = getattr(__snake_case , __snake_case ) if weight_type is not None: _UpperCamelCase = getattr(__snake_case , __snake_case ).shape else: _UpperCamelCase = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": _UpperCamelCase = value elif weight_type == "weight_g": _UpperCamelCase = value elif weight_type == "weight_v": _UpperCamelCase = value elif weight_type == "bias": _UpperCamelCase = value else: _UpperCamelCase = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = [] _UpperCamelCase = fairseq_model.state_dict() _UpperCamelCase = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight _UpperCamelCase = None for name, value in fairseq_dict.items(): _UpperCamelCase = False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _UpperCamelCase = True elif name.split('''.''' )[0] == "proj": _UpperCamelCase = fairseq_model.proj _UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _UpperCamelCase = True if "*" in mapped_key: _UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2] _UpperCamelCase = mapped_key.replace('''*''' , __snake_case ) if "weight_g" in name: _UpperCamelCase = '''weight_g''' elif "weight_v" in name: _UpperCamelCase = '''weight_v''' elif "bias" in name: _UpperCamelCase = '''bias''' elif "weight" in name: _UpperCamelCase = '''weight''' else: _UpperCamelCase = None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) return proj_weight def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = full_name.split('''conv_layers.''' )[-1] _UpperCamelCase = name.split('''.''' ) _UpperCamelCase = int(items[0] ) _UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _UpperCamelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__snake_case ) def _snake_case ( __snake_case ): _UpperCamelCase , _UpperCamelCase = emb.weight.shape _UpperCamelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case ) _UpperCamelCase = emb.weight.data return lin_layer def _snake_case ( __snake_case ): with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: _UpperCamelCase = f.readlines() _UpperCamelCase = [line.split(''' ''' )[0] for line in lines] _UpperCamelCase = len(__snake_case ) _UpperCamelCase = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ): _UpperCamelCase = WavaVecaConfig.from_pretrained(__snake_case ) _UpperCamelCase = SpeechaTextaConfig.from_pretrained( __snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case ) _UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) _UpperCamelCase = model[0].eval() # set weights for wav2vec2 encoder _UpperCamelCase = WavaVecaModel(__snake_case ) _UpperCamelCase = recursively_load_weights_wavaveca(model.encoder , __snake_case ) _UpperCamelCase = SpeechaTextaForCausalLM(__snake_case ) _UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case ) # set output linear layer unexpected_keys.remove('''embed_out''' ) _UpperCamelCase = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) _UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case ) _UpperCamelCase = False # add projection layer _UpperCamelCase = nn.Parameter(projection_layer.weight ) _UpperCamelCase = nn.Parameter(projection_layer.bias ) _UpperCamelCase = create_vocab_dict(__snake_case ) with open(os.path.join(__snake_case , '''vocab.json''' ) , '''w''' ) as fp: json.dump(__snake_case , __snake_case ) _UpperCamelCase = SpeechaTextaTokenizer(os.path.join(__snake_case , '''vocab.json''' ) ) tokenizer.save_pretrained(__snake_case ) _UpperCamelCase = hf_wavavec.config.to_dict() _UpperCamelCase = tokenizer.pad_token_id _UpperCamelCase = tokenizer.bos_token_id _UpperCamelCase = tokenizer.eos_token_id _UpperCamelCase = '''speech_to_text_2''' _UpperCamelCase = '''wav2vec2''' _UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case ) hf_wavavec.save_pretrained(__snake_case ) feature_extractor.save_pretrained(__snake_case ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") _lowerCAmelCase = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
71
0
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin _lowerCAmelCase = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class lowerCAmelCase_ ( unittest.TestCase, __lowercase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = load_tool('''text-question-answering''' ) self.tool.setup() _UpperCamelCase = load_tool('''text-question-answering''' , remote=_A ) def UpperCamelCase_ ( self : Any ): _UpperCamelCase = self.tool(_A , '''What did Hugging Face do in April 2021?''' ) self.assertEqual(_A , '''launched the BigScience Research Workshop''' ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.remote_tool(_A , '''What did Hugging Face do in April 2021?''' ) self.assertEqual(_A , '''launched the BigScience Research Workshop''' ) def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = self.tool(text=_A , question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(_A , '''launched the BigScience Research Workshop''' ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.remote_tool(text=_A , question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(_A , '''launched the BigScience Research Workshop''' )
713
from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[str]=13 , _A : Union[str, Any]=7 , _A : int=True , _A : Optional[int]=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : Optional[int]=99 , _A : Union[str, Any]=32 , _A : Dict=2 , _A : List[Any]=4 , _A : Optional[Any]=37 , _A : int="gelu" , _A : Optional[int]=0.1 , _A : str=0.1 , _A : List[str]=512 , _A : Optional[Any]=16 , _A : Optional[Any]=2 , _A : Optional[int]=0.02 , _A : str=False , _A : int=True , _A : Any="None" , _A : Dict=3 , _A : List[Any]=4 , _A : Optional[Any]=None , ): _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_input_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_labels _UpperCamelCase = num_choices _UpperCamelCase = relative_attention _UpperCamelCase = position_biased_input _UpperCamelCase = pos_att_type _UpperCamelCase = scope def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_input_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCamelCase = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : int , _A : Optional[Any] ): _UpperCamelCase = TFDebertaVaModel(config=_A ) _UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCamelCase = [input_ids, input_mask] _UpperCamelCase = model(_A ) _UpperCamelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self : Dict , _A : Optional[int] , _A : Any , _A : Dict , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] ): _UpperCamelCase = TFDebertaVaForMaskedLM(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self : Dict , _A : Dict , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : Optional[Any] , _A : Tuple , _A : int ): _UpperCamelCase = self.num_labels _UpperCamelCase = TFDebertaVaForSequenceClassification(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : Optional[int] , _A : Any , _A : List[Any] , _A : Dict , _A : Union[str, Any] , _A : List[str] ): _UpperCamelCase = self.num_labels _UpperCamelCase = TFDebertaVaForTokenClassification(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self : Dict , _A : Optional[Any] , _A : Optional[int] , _A : Any , _A : List[str] , _A : str , _A : Optional[int] , _A : str ): _UpperCamelCase = TFDebertaVaForQuestionAnswering(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self : Any ): _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ): UpperCAmelCase = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) UpperCAmelCase = ( { "feature-extraction": TFDebertaVaModel, "fill-mask": TFDebertaVaForMaskedLM, "question-answering": TFDebertaVaForQuestionAnswering, "text-classification": TFDebertaVaForSequenceClassification, "token-classification": TFDebertaVaForTokenClassification, "zero-shot": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = TFDebertaVaModelTester(self ) _UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 ) def UpperCamelCase_ ( self : Any ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) self.assertIsNotNone(_A ) @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def UpperCamelCase_ ( self : List[Any] ): pass @slow def UpperCamelCase_ ( self : int ): _UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) _UpperCamelCase = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) _UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _UpperCamelCase = model(_A , attention_mask=_A )[0] _UpperCamelCase = tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1e-4 )
71
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "gpt_neox" def __init__( self : Union[str, Any] , _A : Union[str, Any]=5_0432 , _A : List[Any]=6144 , _A : int=44 , _A : int=64 , _A : Optional[Any]=2_4576 , _A : Any="gelu" , _A : Tuple=0.25 , _A : Union[str, Any]=1_0000 , _A : Tuple=0.0 , _A : Any=0.0 , _A : int=0.1 , _A : List[str]=2048 , _A : Dict=0.02 , _A : Optional[Any]=1e-5 , _A : Tuple=True , _A : List[Any]=0 , _A : Optional[int]=2 , _A : Optional[int]=False , _A : List[Any]=True , _A : Any=None , **_A : Any , ): super().__init__(bos_token_id=_A , eos_token_id=_A , **_A ) _UpperCamelCase = vocab_size _UpperCamelCase = max_position_embeddings _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = rotary_pct _UpperCamelCase = rotary_emb_base _UpperCamelCase = attention_dropout _UpperCamelCase = hidden_dropout _UpperCamelCase = classifier_dropout _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = use_cache _UpperCamelCase = tie_word_embeddings _UpperCamelCase = use_parallel_residual _UpperCamelCase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def UpperCamelCase_ ( self : str ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F"""got {self.rope_scaling}""" ) _UpperCamelCase = self.rope_scaling.get('''type''' , _A ) _UpperCamelCase = self.rope_scaling.get('''factor''' , _A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
714
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ): # Return True if there is node that has not iterated. _UpperCamelCase = [False] * len(__snake_case ) _UpperCamelCase = [] queue.append(__snake_case ) _UpperCamelCase = True while queue: _UpperCamelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__snake_case ) _UpperCamelCase = True _UpperCamelCase = u return visited[t] def _snake_case ( __snake_case , __snake_case , __snake_case ): # This array is filled by BFS and to store path _UpperCamelCase = [-1] * (len(__snake_case )) _UpperCamelCase = 0 while bfs(__snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = float('''Inf''' ) _UpperCamelCase = sink while s != source: # Find the minimum value in select path _UpperCamelCase = min(__snake_case , graph[parent[s]][s] ) _UpperCamelCase = parent[s] max_flow += path_flow _UpperCamelCase = sink while v != source: _UpperCamelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCamelCase = parent[v] return max_flow _lowerCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _lowerCAmelCase, _lowerCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
71
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase = { "configuration_xmod": [ "XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP", "XmodConfig", "XmodOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "XMOD_PRETRAINED_MODEL_ARCHIVE_LIST", "XmodForCausalLM", "XmodForMaskedLM", "XmodForMultipleChoice", "XmodForQuestionAnswering", "XmodForSequenceClassification", "XmodForTokenClassification", "XmodModel", "XmodPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowerCAmelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST", "UniSpeechForCTC", "UniSpeechForPreTraining", "UniSpeechForSequenceClassification", "UniSpeechModel", "UniSpeechPreTrainedModel", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
71
0
from collections.abc import Callable class lowerCAmelCase_ : def __init__( self : Optional[int] , _A : Callable | None = None ): # Stores actual heap items. _UpperCamelCase = [] # Stores indexes of each item for supporting updates and deletion. _UpperCamelCase = {} # Stores current size of heap. _UpperCamelCase = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. _UpperCamelCase = key or (lambda _A : x) def UpperCamelCase_ ( self : Union[str, Any] , _A : int ): return int((i - 1) / 2 ) if i > 0 else None def UpperCamelCase_ ( self : List[str] , _A : int ): _UpperCamelCase = int(2 * i + 1 ) return left if 0 < left < self.size else None def UpperCamelCase_ ( self : Dict , _A : int ): _UpperCamelCase = int(2 * i + 2 ) return right if 0 < right < self.size else None def UpperCamelCase_ ( self : Tuple , _A : int , _A : int ): _UpperCamelCase , _UpperCamelCase = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. _UpperCamelCase , _UpperCamelCase = self.arr[j], self.arr[i] def UpperCamelCase_ ( self : str , _A : int , _A : int ): return self.arr[i][1] < self.arr[j][1] def UpperCamelCase_ ( self : List[Any] , _A : int ): _UpperCamelCase = self._left(_A ) _UpperCamelCase = self._right(_A ) _UpperCamelCase = i if left is not None and not self._cmp(_A , _A ): _UpperCamelCase = left if right is not None and not self._cmp(_A , _A ): _UpperCamelCase = right return valid_parent def UpperCamelCase_ ( self : Optional[int] , _A : int ): _UpperCamelCase = self._parent(_A ) while parent is not None and not self._cmp(_A , _A ): self._swap(_A , _A ) _UpperCamelCase , _UpperCamelCase = parent, self._parent(_A ) def UpperCamelCase_ ( self : List[Any] , _A : int ): _UpperCamelCase = self._get_valid_parent(_A ) while valid_parent != index: self._swap(_A , _A ) _UpperCamelCase , _UpperCamelCase = valid_parent, self._get_valid_parent(_A ) def UpperCamelCase_ ( self : str , _A : int , _A : int ): if item not in self.pos_map: return _UpperCamelCase = self.pos_map[item] _UpperCamelCase = [item, self.key(_A )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(_A ) self._heapify_down(_A ) def UpperCamelCase_ ( self : Optional[Any] , _A : int ): if item not in self.pos_map: return _UpperCamelCase = self.pos_map[item] del self.pos_map[item] _UpperCamelCase = self.arr[self.size - 1] _UpperCamelCase = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(_A ) self._heapify_down(_A ) def UpperCamelCase_ ( self : List[Any] , _A : int , _A : int ): _UpperCamelCase = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(_A )] ) else: _UpperCamelCase = [item, self.key(_A )] _UpperCamelCase = self.size self.size += 1 self._heapify_up(self.size - 1 ) def UpperCamelCase_ ( self : str ): return self.arr[0] if self.size else None def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def _snake_case ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
716
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = tempfile.mkdtemp() # fmt: off _UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _UpperCamelCase = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } _UpperCamelCase = os.path.join(self.tmpdirname , _A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_A , _A ) def UpperCamelCase_ ( self : Tuple , **_A : Optional[Any] ): return BertTokenizer.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : List[Any] , **_A : Union[str, Any] ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : int ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _UpperCamelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = self.get_image_processor() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCamelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = image_processor(_A , return_tensors='''np''' ) _UpperCamelCase = processor(images=_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = processor(text=_A ) _UpperCamelCase = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(_A ): processor() def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCamelCase = processor.batch_decode(_A ) _UpperCamelCase = tokenizer.batch_decode(_A ) self.assertListEqual(_A , _A ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
71
0
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=99 , _A : Optional[Any]=13 , _A : List[str]=7 , _A : List[Any]=9 , _A : Dict=True , _A : str=True , _A : List[Any]=False , _A : Union[str, Any]=32 , _A : List[Any]=5 , _A : List[str]=4 , _A : Tuple=37 , _A : Optional[int]=8 , _A : Optional[int]=0.1 , _A : Union[str, Any]=0.002 , _A : Optional[Any]=1 , _A : List[Any]=0 , _A : List[str]=0 , _A : Dict=None , _A : Dict=None , ): _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = encoder_seq_length _UpperCamelCase = decoder_seq_length # For common tests _UpperCamelCase = self.decoder_seq_length _UpperCamelCase = is_training _UpperCamelCase = use_attention_mask _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = d_ff _UpperCamelCase = relative_attention_num_buckets _UpperCamelCase = dropout_rate _UpperCamelCase = initializer_factor _UpperCamelCase = eos_token_id _UpperCamelCase = pad_token_id _UpperCamelCase = decoder_start_token_id _UpperCamelCase = None _UpperCamelCase = decoder_layers def UpperCamelCase_ ( self : Optional[int] ): return TaConfig.from_pretrained('''google/umt5-base''' ) def UpperCamelCase_ ( self : Optional[int] , _A : str , _A : Dict , _A : int , _A : str=None , _A : Dict=None , _A : List[str]=None , _A : Any=None , _A : str=None , ): if attention_mask is None: _UpperCamelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: _UpperCamelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: _UpperCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_A ) if decoder_head_mask is None: _UpperCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_A ) if cross_attn_head_mask is None: _UpperCamelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=_A ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) _UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _UpperCamelCase = input_ids.clamp(self.pad_token_id + 1 ) _UpperCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) _UpperCamelCase = self.get_config() _UpperCamelCase = config.num_attention_heads _UpperCamelCase = self.prepare_inputs_dict(_A , _A , _A ) return config, input_dict def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs() return config, inputs_dict def UpperCamelCase_ ( self : Optional[int] ): return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def UpperCamelCase_ ( self : int ): return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def UpperCamelCase_ ( self : str , _A : List[str] , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Any , _A : Optional[int] , ): _UpperCamelCase = UMTaModel(config=_A ) model.to(_A ) model.eval() _UpperCamelCase = model( input_ids=_A , decoder_input_ids=_A , attention_mask=_A , decoder_attention_mask=_A , ) _UpperCamelCase = model(input_ids=_A , decoder_input_ids=_A ) _UpperCamelCase = result.last_hidden_state _UpperCamelCase = result.past_key_values _UpperCamelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(_A ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def UpperCamelCase_ ( self : List[Any] , _A : int , _A : str , _A : List[str] , _A : List[Any] , _A : Any , _A : Optional[Any] , ): _UpperCamelCase = UMTaModel(config=_A ).get_decoder().to(_A ).eval() # first forward pass _UpperCamelCase = model(_A , use_cache=_A ) _UpperCamelCase = model(_A ) _UpperCamelCase = model(_A , use_cache=_A ) self.parent.assertTrue(len(_A ) == len(_A ) ) self.parent.assertTrue(len(_A ) == len(_A ) + 1 ) _UpperCamelCase , _UpperCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _UpperCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and _UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) _UpperCamelCase = model(_A )['''last_hidden_state'''] _UpperCamelCase = model(_A , past_key_values=_A )['''last_hidden_state'''] # select random slice _UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() _UpperCamelCase = output_from_no_past[:, -1, random_slice_idx].detach() _UpperCamelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) ) def UpperCamelCase_ ( self : Optional[int] , _A : List[Any] , _A : Any , ): _UpperCamelCase = UMTaModel(config=_A ).to(_A ).half().eval() _UpperCamelCase = model(**_A )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(_A ).any().item() ) @require_torch class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ): UpperCAmelCase = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) UpperCAmelCase = (UMTaForConditionalGeneration,) if is_torch_available() else () UpperCAmelCase = ( { "conversational": UMTaForConditionalGeneration, "feature-extraction": UMTaModel, "summarization": UMTaForConditionalGeneration, "text2text-generation": UMTaForConditionalGeneration, "translation": UMTaForConditionalGeneration, "question-answering": UMTaForQuestionAnswering, } if is_torch_available() else {} ) UpperCAmelCase = True UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = True UpperCAmelCase = True # The small UMT5 model needs higher percentages for CPU/MP tests UpperCAmelCase = [0.8, 0.9] def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() _UpperCamelCase = UMTaModel(config_and_inputs[0] ).to(_A ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( _A , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=_A , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def UpperCamelCase_ ( self : Any ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] _UpperCamelCase = self.model_tester.prepare_config_and_inputs() _UpperCamelCase = config_and_inputs[0] _UpperCamelCase = UMTaForConditionalGeneration(_A ).eval() model.to(_A ) _UpperCamelCase = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=_A ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=_A ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=_A ), } for attn_name, (name, mask) in zip(_A , head_masking.items() ): _UpperCamelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": _UpperCamelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=_A ) _UpperCamelCase = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=_A , return_dict_in_generate=_A , **_A , ) # We check the state of decoder_attentions and cross_attentions just from the last step _UpperCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def UpperCamelCase_ ( self : List[str] ): pass @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=_A ).to(_A ) _UpperCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=_A , legacy=_A ) _UpperCamelCase = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] _UpperCamelCase = tokenizer(_A , return_tensors='''pt''' , padding=_A ).input_ids # fmt: off _UpperCamelCase = torch.tensor( [ [ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(_A , _A ) _UpperCamelCase = model.generate(input_ids.to(_A ) ) _UpperCamelCase = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] _UpperCamelCase = tokenizer.batch_decode(_A ) self.assertEqual(_A , _A )
717
def _snake_case ( __snake_case , __snake_case , __snake_case ): if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod else: _UpperCamelCase = binary_exponentiation(__snake_case , n / 2 , __snake_case ) return (b * b) % mod # a prime number _lowerCAmelCase = 701 _lowerCAmelCase = 1_000_000_000 _lowerCAmelCase = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
71
0
import math def _snake_case ( __snake_case = 100 ): _UpperCamelCase = sum(i * i for i in range(1 , n + 1 ) ) _UpperCamelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f'{solution() = }')
718
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = (1 - _cos) / 2 _UpperCamelCase = 1 - _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = (1 + _cos) / 2 _UpperCamelCase = -1 - _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = _sin / 2 _UpperCamelCase = 0 _UpperCamelCase = -ba _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 1 - alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = 1 + alpha * big_a _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha * big_a _UpperCamelCase = 1 + alpha / big_a _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha / big_a _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase = 2 * sqrt(__snake_case ) * alpha _UpperCamelCase = big_a * (pmc + aaa) _UpperCamelCase = 2 * big_a * mpc _UpperCamelCase = big_a * (pmc - aaa) _UpperCamelCase = ppmc + aaa _UpperCamelCase = -2 * pmpc _UpperCamelCase = ppmc - aaa _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase = 2 * sqrt(__snake_case ) * alpha _UpperCamelCase = big_a * (ppmc + aaa) _UpperCamelCase = -2 * big_a * pmpc _UpperCamelCase = big_a * (ppmc - aaa) _UpperCamelCase = pmc + aaa _UpperCamelCase = 2 * mpc _UpperCamelCase = pmc - aaa _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
71
0
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): if index == r: for j in range(__snake_case ): print(data[j] , end=''' ''' ) print(''' ''' ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location _UpperCamelCase = arr[i] combination_util(__snake_case , __snake_case , __snake_case , index + 1 , __snake_case , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def _snake_case ( __snake_case , __snake_case , __snake_case ): # A temporary array to store all combination one by one _UpperCamelCase = [0] * r # Print all combination using temporary array 'data[]' combination_util(__snake_case , __snake_case , __snake_case , 0 , __snake_case , 0 ) if __name__ == "__main__": # Driver code to check the function above _lowerCAmelCase = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
719
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "gpt_neox" def __init__( self : Union[str, Any] , _A : Union[str, Any]=5_0432 , _A : List[Any]=6144 , _A : int=44 , _A : int=64 , _A : Optional[Any]=2_4576 , _A : Any="gelu" , _A : Tuple=0.25 , _A : Union[str, Any]=1_0000 , _A : Tuple=0.0 , _A : Any=0.0 , _A : int=0.1 , _A : List[str]=2048 , _A : Dict=0.02 , _A : Optional[Any]=1e-5 , _A : Tuple=True , _A : List[Any]=0 , _A : Optional[int]=2 , _A : Optional[int]=False , _A : List[Any]=True , _A : Any=None , **_A : Any , ): super().__init__(bos_token_id=_A , eos_token_id=_A , **_A ) _UpperCamelCase = vocab_size _UpperCamelCase = max_position_embeddings _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = rotary_pct _UpperCamelCase = rotary_emb_base _UpperCamelCase = attention_dropout _UpperCamelCase = hidden_dropout _UpperCamelCase = classifier_dropout _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = use_cache _UpperCamelCase = tie_word_embeddings _UpperCamelCase = use_parallel_residual _UpperCamelCase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def UpperCamelCase_ ( self : str ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F"""got {self.rope_scaling}""" ) _UpperCamelCase = self.rope_scaling.get('''type''' , _A ) _UpperCamelCase = self.rope_scaling.get('''factor''' , _A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
71
0
import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class lowerCAmelCase_ ( __lowercase, unittest.TestCase ): UpperCAmelCase = WavaVecaPhonemeCTCTokenizer UpperCAmelCase = False def UpperCamelCase_ ( self : str ): super().setUp() _UpperCamelCase = ( '''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː ''' '''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː ''' '''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 ''' '''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ ''' '''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ ''' '''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ ''' '''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ''' '''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ ''' '''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ ''' '''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ ''' '''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ ''' '''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ ''' '''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4''' ).split(''' ''' ) _UpperCamelCase = dict(zip(_A , range(len(_A ) ) ) ) _UpperCamelCase = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''} _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : Union[str, Any]=False , _A : List[str]=20 , _A : Optional[Any]=5 ): _UpperCamelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_A )) for i in range(len(_A ) )] _UpperCamelCase = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , do_phonemize=_A ) , _A ) ) if max_length is not None and len(_A ) > max_length: _UpperCamelCase = toks[:max_length] if min_length is not None and len(_A ) < min_length and len(_A ) > 0: while len(_A ) < min_length: _UpperCamelCase = toks + toks # toks_str = [t[1] for t in toks] _UpperCamelCase = [t[0] for t in toks] # Ensure consistency _UpperCamelCase = tokenizer.decode(_A , clean_up_tokenization_spaces=_A ) if " " not in output_txt and len(_A ) > 1: _UpperCamelCase = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A ) ) if with_prefix_space: _UpperCamelCase = ''' ''' + output_txt _UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A ) return output_txt, output_ids def UpperCamelCase_ ( self : Dict , **_A : int ): kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) # check adding a single token tokenizer.add_tokens('''xxx''' ) _UpperCamelCase = tokenizer('''m xxx ɪ''' , do_phonemize=_A ).input_ids self.assertEqual(_A , [13, 392, 17] ) # xxx should be last token tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] ) _UpperCamelCase = tokenizer('''m aaa ɪ ccc''' , do_phonemize=_A ).input_ids self.assertEqual(_A , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa _UpperCamelCase = tokenizer('''maɪ c''' , do_phonemize=_A ).input_ids self.assertEqual(_A , [3, 200] ) # mai should be <unk> (=3) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) _UpperCamelCase = '''Hello how are you''' _UpperCamelCase = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' ) self.assertEqual(_A , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) _UpperCamelCase = '''Hello how are you''' _UpperCamelCase = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(_A ).input_ids , tokenizer(_A , do_phonemize=_A ).input_ids ) def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) _UpperCamelCase = '''Hello how are you''' _UpperCamelCase = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' ) _UpperCamelCase = tokenizer.decode(tokenizer(_A ).input_ids ) self.assertEqual(_A , _A ) def UpperCamelCase_ ( self : Any ): _UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) _UpperCamelCase = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] _UpperCamelCase = tokenizer.decode(sample_ids[0] ) _UpperCamelCase = tokenizer.batch_decode(_A ) self.assertEqual(_A , batch_tokens[0] ) self.assertEqual(_A , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) _UpperCamelCase = '''Hello how are you''' _UpperCamelCase = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' ) self.assertEqual(_A , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) _UpperCamelCase = '''Hello how are you''' _UpperCamelCase = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(_A ).input_ids , tokenizer(_A , do_phonemize=_A ).input_ids ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off _UpperCamelCase = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter _UpperCamelCase = tokenizer.decode(sample_ids[0] ) _UpperCamelCase = tokenizer.batch_decode(_A ) self.assertEqual(_A , batch_tokens[0] ) self.assertEqual(_A , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) # decode with no word_del_token filter _UpperCamelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_A ) _UpperCamelCase = tokenizer.batch_decode(_A , filter_word_delimiter_token=_A ) self.assertEqual(_A , batch_tokens[0] ) self.assertEqual(_A , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) _UpperCamelCase = '''Hello how are you''' _UpperCamelCase = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' ) _UpperCamelCase = tokenizer.decode(tokenizer(_A ).input_ids , filter_word_delimiter_token=_A ) self.assertEqual(_A , _A ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) _UpperCamelCase = '''Hello how are you''' _UpperCamelCase = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' ) _UpperCamelCase = tokenizer.decode(tokenizer(_A ).input_ids , filter_word_delimiter_token=_A ) self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , _A ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=_A ) _UpperCamelCase = '''Hello how are you''' _UpperCamelCase = tokenizer(_A , phonemizer_lang='''en-us''' ).input_ids _UpperCamelCase = tokenizer(_A , phonemizer_lang='''fr-fr''' ).input_ids self.assertNotEqual(_A , _A ) _UpperCamelCase = tokenizer.decode(_A ) _UpperCamelCase = tokenizer.decode(_A ) self.assertEqual(_A , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) self.assertEqual(_A , '''ɛ l o h aʊ a ʁ j u''' ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) _UpperCamelCase = '''Hello how Are you''' _UpperCamelCase = '''hello how are you''' _UpperCamelCase = tokenizer(_A ).input_ids _UpperCamelCase = tokenizer(_A ).input_ids self.assertEqual(_A , _A ) def UpperCamelCase_ ( self : str ): _UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) tokenizer.add_tokens(['''!''', '''?'''] ) tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} ) # fmt: off _UpperCamelCase = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394], ] # fmt: on _UpperCamelCase = tokenizer.batch_decode(_A ) self.assertEqual(_A , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] ) @staticmethod def UpperCamelCase_ ( _A : Optional[int] , _A : str ): _UpperCamelCase = [d[key] for d in offsets] return retrieved_list def UpperCamelCase_ ( self : str ): _UpperCamelCase = self.get_tokenizer(word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" _UpperCamelCase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on _UpperCamelCase = tokenizer.decode(_A , output_char_offsets=_A , filter_word_delimiter_token=_A ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''char_offsets''' in outputs ) self.assertTrue(isinstance(_A , _A ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = self.get_tokenizer(word_delimiter_token='''|''' ) def check_list_tuples_equal(_A : List[str] , _A : List[str] ): self.assertTrue(isinstance(_A , _A ) ) self.assertTrue(isinstance(outputs_list[0] , _A ) ) # transform list to ModelOutput _UpperCamelCase = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] ) def recursive_check(_A : Tuple , _A : Any ): if isinstance(_A , _A ): [recursive_check(_A , _A ) for la, la in zip(_A , _A )] self.assertEqual(_A , _A ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] ) # fmt: off _UpperCamelCase = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char _UpperCamelCase = tokenizer.batch_decode(_A , output_char_offsets=_A ) _UpperCamelCase = [tokenizer.decode(_A , output_char_offsets=_A ) for ids in sample_ids] check_list_tuples_equal(_A , _A ) @unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' ) def UpperCamelCase_ ( self : Tuple ): pass @unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' ) def UpperCamelCase_ ( self : Dict ): pass @unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' ) def UpperCamelCase_ ( self : str ): pass @unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' ) def UpperCamelCase_ ( self : Dict ): pass def UpperCamelCase_ ( self : Any ): _UpperCamelCase = self.get_tokenizers(do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): _UpperCamelCase = tokenizer.vocab_size _UpperCamelCase = len(_A ) self.assertNotEqual(_A , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) _UpperCamelCase = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] _UpperCamelCase = tokenizer.add_tokens(_A ) _UpperCamelCase = tokenizer.vocab_size _UpperCamelCase = len(_A ) self.assertNotEqual(_A , 0 ) self.assertEqual(_A , _A ) self.assertEqual(_A , len(_A ) ) self.assertEqual(_A , all_size + len(_A ) ) _UpperCamelCase = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_A ) self.assertGreaterEqual(len(_A ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) _UpperCamelCase = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} _UpperCamelCase = tokenizer.add_special_tokens(_A ) _UpperCamelCase = tokenizer.vocab_size _UpperCamelCase = len(_A ) self.assertNotEqual(_A , 0 ) self.assertEqual(_A , _A ) self.assertEqual(_A , len(_A ) ) self.assertEqual(_A , all_size_a + len(_A ) ) _UpperCamelCase = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_A ) self.assertGreaterEqual(len(_A ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def UpperCamelCase_ ( self : str ): pass @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def UpperCamelCase_ ( self : Union[str, Any] ): pass def UpperCamelCase_ ( self : Any ): # The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which # is not the case for Wav2Vec2PhonemeCTCTokenizer. _UpperCamelCase = self.get_tokenizers(fast=_A , do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): _UpperCamelCase = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t'''] _UpperCamelCase = tokenizer.convert_tokens_to_string(_A ) self.assertIsInstance(output['''text'''] , _A )
720
from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=__lowercase ): UpperCAmelCase = ["keras_nlp"] def __init__( self : Any , *_A : Dict , **_A : List[str] ): requires_backends(self , ['''keras_nlp'''] )
71
0
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __snake_case = sys.version_info >= (3, 10) def _snake_case ( __snake_case=None , __snake_case=None ): return field(default_factory=lambda: default , metadata=__snake_case ) @dataclass class lowerCAmelCase_ : UpperCAmelCase = 42 UpperCAmelCase = 42 UpperCAmelCase = 42 UpperCAmelCase = 42 @dataclass class lowerCAmelCase_ : UpperCAmelCase = 42 UpperCAmelCase = field(default="toto", metadata={"help": "help message"} ) @dataclass class lowerCAmelCase_ : UpperCAmelCase = False UpperCAmelCase = True UpperCAmelCase = None class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "titi" UpperCAmelCase = "toto" class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "titi" UpperCAmelCase = "toto" UpperCAmelCase = 42 @dataclass class lowerCAmelCase_ : UpperCAmelCase = "toto" def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = BasicEnum(self.foo ) @dataclass class lowerCAmelCase_ : UpperCAmelCase = "toto" def UpperCamelCase_ ( self : int ): _UpperCamelCase = MixedTypeEnum(self.foo ) @dataclass class lowerCAmelCase_ : UpperCAmelCase = None UpperCAmelCase = field(default=__lowercase, metadata={"help": "help message"} ) UpperCAmelCase = None UpperCAmelCase = list_field(default=[] ) UpperCAmelCase = list_field(default=[] ) @dataclass class lowerCAmelCase_ : UpperCAmelCase = list_field(default=[] ) UpperCAmelCase = list_field(default=[1, 2, 3] ) UpperCAmelCase = list_field(default=["Hallo", "Bonjour", "Hello"] ) UpperCAmelCase = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field() UpperCAmelCase = field() UpperCAmelCase = field() def UpperCamelCase_ ( self : int ): _UpperCamelCase = BasicEnum(self.required_enum ) @dataclass class lowerCAmelCase_ : UpperCAmelCase = 42 UpperCAmelCase = field() UpperCAmelCase = None UpperCAmelCase = field(default="toto", metadata={"help": "help message"} ) UpperCAmelCase = list_field(default=["Hallo", "Bonjour", "Hello"] ) if is_python_no_less_than_3_10: @dataclass class lowerCAmelCase_ : UpperCAmelCase = False UpperCAmelCase = True UpperCAmelCase = None @dataclass class lowerCAmelCase_ : UpperCAmelCase = None UpperCAmelCase = field(default=__lowercase, metadata={"help": "help message"} ) UpperCAmelCase = None UpperCAmelCase = list_field(default=[] ) UpperCAmelCase = list_field(default=[] ) class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Tuple , _A : argparse.ArgumentParser , _A : argparse.ArgumentParser ): self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): _UpperCamelCase = {k: v for k, v in vars(_A ).items() if k != '''container'''} _UpperCamelCase = {k: v for k, v in vars(_A ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , _A ) and yy.get('''choices''' , _A ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](_A ) , yy['''type'''](_A ) ) del xx["type"], yy["type"] self.assertEqual(_A , _A ) def UpperCamelCase_ ( self : str ): _UpperCamelCase = HfArgumentParser(_A ) _UpperCamelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_A , required=_A ) expected.add_argument('''--bar''' , type=_A , required=_A ) expected.add_argument('''--baz''' , type=_A , required=_A ) expected.add_argument('''--flag''' , type=_A , default=_A , const=_A , nargs='''?''' ) self.argparsersEqual(_A , _A ) _UpperCamelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((_UpperCamelCase ) , ) = parser.parse_args_into_dataclasses(_A , look_for_args_file=_A ) self.assertFalse(example.flag ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = HfArgumentParser(_A ) _UpperCamelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=_A ) expected.add_argument('''--baz''' , default='''toto''' , type=_A , help='''help message''' ) self.argparsersEqual(_A , _A ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_A , default=_A , const=_A , nargs='''?''' ) expected.add_argument('''--baz''' , type=_A , default=_A , const=_A , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=_A , dest='''baz''' ) expected.add_argument('''--opt''' , type=_A , default=_A ) _UpperCamelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_A ) for dataclass_type in dataclass_types: _UpperCamelCase = HfArgumentParser(_A ) self.argparsersEqual(_A , _A ) _UpperCamelCase = parser.parse_args([] ) self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) ) _UpperCamelCase = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) ) _UpperCamelCase = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) ) _UpperCamelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) ) _UpperCamelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = HfArgumentParser(_A ) _UpperCamelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_A , _A ) _UpperCamelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCamelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) _UpperCamelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCamelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) _UpperCamelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) _UpperCamelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def UpperCamelCase_ ( self : str ): @dataclass class lowerCAmelCase_ : UpperCAmelCase = "toto" _UpperCamelCase = HfArgumentParser(_A ) _UpperCamelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_A , _A ) _UpperCamelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCamelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCamelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def UpperCamelCase_ ( self : Any ): _UpperCamelCase = HfArgumentParser(_A ) _UpperCamelCase = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_A ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_A ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_A ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_A ) self.argparsersEqual(_A , _A ) _UpperCamelCase = parser.parse_args([] ) self.assertEqual( _A , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) _UpperCamelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(_A , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=_A , type=_A ) expected.add_argument('''--bar''' , default=_A , type=_A , help='''help message''' ) expected.add_argument('''--baz''' , default=_A , type=_A ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_A ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_A ) _UpperCamelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_A ) for dataclass_type in dataclass_types: _UpperCamelCase = HfArgumentParser(_A ) self.argparsersEqual(_A , _A ) _UpperCamelCase = parser.parse_args([] ) self.assertEqual(_A , Namespace(foo=_A , bar=_A , baz=_A , ces=[] , des=[] ) ) _UpperCamelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(_A , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = HfArgumentParser(_A ) _UpperCamelCase = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=_A , required=_A ) expected.add_argument('''--required_str''' , type=_A , required=_A ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_A , ) self.argparsersEqual(_A , _A ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = HfArgumentParser(_A ) _UpperCamelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_A , required=_A ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_A , ) expected.add_argument('''--opt''' , type=_A , default=_A ) expected.add_argument('''--baz''' , default='''toto''' , type=_A , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_A ) self.argparsersEqual(_A , _A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = HfArgumentParser(_A ) _UpperCamelCase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } _UpperCamelCase = parser.parse_dict(_A )[0] _UpperCamelCase = BasicExample(**_A ) self.assertEqual(_A , _A ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = HfArgumentParser(_A ) _UpperCamelCase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(_A , parser.parse_dict , _A , allow_extra_keys=_A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = HfArgumentParser(_A ) _UpperCamelCase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCamelCase = os.path.join(_A , '''temp_json''' ) os.mkdir(_A ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(_A , _A ) _UpperCamelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] _UpperCamelCase = BasicExample(**_A ) self.assertEqual(_A , _A ) def UpperCamelCase_ ( self : str ): _UpperCamelCase = HfArgumentParser(_A ) _UpperCamelCase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCamelCase = os.path.join(_A , '''temp_yaml''' ) os.mkdir(_A ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(_A , _A ) _UpperCamelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] _UpperCamelCase = BasicExample(**_A ) self.assertEqual(_A , _A ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = HfArgumentParser(_A ) self.assertIsNotNone(_A )
721
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = "RegNetConfig" # Base docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = [1, 1_088, 7, 7] # Image classification docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = "tabby, tabby cat" _lowerCAmelCase = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , **_A : Any , ): super().__init__(**_A ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=_A , strides=_A , padding='''VALID''' , groups=_A , use_bias=_A , name='''convolution''' , ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) _UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity def UpperCamelCase_ ( self : Any , _A : Any ): _UpperCamelCase = self.convolution(self.padding(_A ) ) _UpperCamelCase = self.normalization(_A ) _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] , _A : RegNetConfig , **_A : Any ): super().__init__(**_A ) _UpperCamelCase = config.num_channels _UpperCamelCase = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ): _UpperCamelCase = shape_list(_A )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _UpperCamelCase = tf.transpose(_A , perm=(0, 2, 3, 1) ) _UpperCamelCase = self.embedder(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 2 , **_A : Optional[Any] ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=1 , strides=_A , use_bias=_A , name='''convolution''' ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def UpperCamelCase_ ( self : str , _A : tf.Tensor , _A : bool = False ): return self.normalization(self.convolution(_A ) , training=_A ) class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Dict , _A : int , _A : int , **_A : Dict ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) _UpperCamelCase = [ tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def UpperCamelCase_ ( self : List[str] , _A : List[Any] ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] _UpperCamelCase = self.pooler(_A ) for layer_module in self.attention: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = hidden_state * pooled return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : str ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.2''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Dict , _A : Tuple ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : int ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.3''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Tuple , _A : List[Any] ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Tuple , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , **_A : Union[str, Any] ): super().__init__(**_A ) _UpperCamelCase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer _UpperCamelCase = [ # downsampling is done in the first layer with stride of 2 layer(_A , _A , _A , stride=_A , name='''layers.0''' ), *[layer(_A , _A , _A , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[int] ): for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , **_A : List[str] ): super().__init__(**_A ) _UpperCamelCase = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) _UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_A , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_A , _A , _A , depth=_A , name=F"""stages.{i+1}""" ) ) def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : bool = False , _A : bool = True ): _UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) _UpperCamelCase = stage_module(_A ) if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A ) @keras_serializable class lowerCAmelCase_ ( tf.keras.layers.Layer ): UpperCAmelCase = RegNetConfig def __init__( self : int , _A : Tuple , **_A : int ): super().__init__(**_A ) _UpperCamelCase = config _UpperCamelCase = TFRegNetEmbeddings(_A , name='''embedder''' ) _UpperCamelCase = TFRegNetEncoder(_A , name='''encoder''' ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) @unpack_inputs def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.embedder(_A , training=_A ) _UpperCamelCase = self.encoder( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = encoder_outputs[0] _UpperCamelCase = self.pooler(_A ) # Change to NCHW output format have uniformity in the modules _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _UpperCamelCase = tuple([tf.transpose(_A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_A , pooler_output=_A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = RegNetConfig UpperCAmelCase = "regnet" UpperCAmelCase = "pixel_values" @property def UpperCamelCase_ ( self : Tuple ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} _lowerCAmelCase = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n" _lowerCAmelCase = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top.", __lowercase, ) class lowerCAmelCase_ ( __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Optional[int] , **_A : Tuple ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase_ ( self : Any , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[int]=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( pixel_values=_A , output_hidden_states=_A , return_dict=_A , training=_A , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", __lowercase, ) class lowerCAmelCase_ ( __lowercase, __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Any , **_A : int ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = config.num_labels _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) # classification head _UpperCamelCase = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase_ ( self : str , _A : tf.Tensor = None , _A : tf.Tensor = None , _A : bool = None , _A : bool = None , _A : Any=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCamelCase = self.classifier[0](_A ) _UpperCamelCase = self.classifier[1](_A ) _UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=_A , logits=_A ) if not return_dict: _UpperCamelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
71
0
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel __SCREAMING_SNAKE_CASE : List[str] ='''0.12''' # assumed parallelism: 8 @require_flax @is_staging_test class A_ ( unittest.TestCase ): @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple ): lowercase = TOKEN HfFolder.save_token(snake_case__ ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[Any] ): try: delete_repo(token=cls._token , repo_id="""test-model-flax""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" ) except HTTPError: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowercase = FlaxBertModel(snake_case__ ) model.push_to_hub("""test-model-flax""" , use_auth_token=self._token ) lowercase = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" ) lowercase = flatten_dict(unfreeze(model.params ) ) lowercase = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowercase = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(snake_case__ , 1E-3 , msg=F"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id="""test-model-flax""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(snake_case__ , repo_id="""test-model-flax""" , push_to_hub=snake_case__ , use_auth_token=self._token ) lowercase = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" ) lowercase = flatten_dict(unfreeze(model.params ) ) lowercase = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowercase = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(snake_case__ , 1E-3 , msg=F"""{key} not identical""" ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): lowercase = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowercase = FlaxBertModel(snake_case__ ) model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token ) lowercase = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" ) lowercase = flatten_dict(unfreeze(model.params ) ) lowercase = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowercase = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(snake_case__ , 1E-3 , msg=F"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( snake_case__ , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=snake_case__ , use_auth_token=self._token ) lowercase = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" ) lowercase = flatten_dict(unfreeze(model.params ) ) lowercase = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowercase = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(snake_case__ , 1E-3 , msg=F"""{key} not identical""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = True lowercase = flatten_dict(modela.params ) lowercase = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: lowercase = False return models_are_equal @require_flax class A_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) lowercase = FlaxBertModel(snake_case__ ) lowercase = """bert""" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(snake_case__ , snake_case__ ) ) with self.assertRaises(snake_case__ ): lowercase = FlaxBertModel.from_pretrained(snake_case__ ) lowercase = FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ ) self.assertTrue(check_models_equal(snake_case__ , snake_case__ ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowercase = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) lowercase = FlaxBertModel(snake_case__ ) lowercase = """bert""" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(snake_case__ , snake_case__ ) , max_shard_size="""10KB""" ) with self.assertRaises(snake_case__ ): lowercase = FlaxBertModel.from_pretrained(snake_case__ ) lowercase = FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ ) self.assertTrue(check_models_equal(snake_case__ , snake_case__ ) ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): lowercase = """bert""" lowercase = """hf-internal-testing/tiny-random-bert-subfolder""" with self.assertRaises(snake_case__ ): lowercase = FlaxBertModel.from_pretrained(snake_case__ ) lowercase = FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ ) self.assertIsNotNone(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = """bert""" lowercase = """hf-internal-testing/tiny-random-bert-sharded-subfolder""" with self.assertRaises(snake_case__ ): lowercase = FlaxBertModel.from_pretrained(snake_case__ ) lowercase = FlaxBertModel.from_pretrained(snake_case__ , subfolder=snake_case__ ) self.assertIsNotNone(snake_case__ )
72
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig __SCREAMING_SNAKE_CASE : Any =logging.get_logger(__name__) # General docstring __SCREAMING_SNAKE_CASE : Union[str, Any] ='''PoolFormerConfig''' # Base docstring __SCREAMING_SNAKE_CASE : List[Any] ='''sail/poolformer_s12''' __SCREAMING_SNAKE_CASE : Union[str, Any] =[1, 512, 7, 7] # Image classification docstring __SCREAMING_SNAKE_CASE : Any ='''sail/poolformer_s12''' __SCREAMING_SNAKE_CASE : Union[str, Any] ='''tabby, tabby cat''' __SCREAMING_SNAKE_CASE : Tuple =[ '''sail/poolformer_s12''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = False ): if drop_prob == 0.0 or not training: return input lowercase = 1 - drop_prob lowercase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets lowercase = keep_prob + torch.rand(lowerCAmelCase__ ,dtype=input.dtype ,device=input.device ) random_tensor.floor_() # binarize lowercase = input.div(lowerCAmelCase__ ) * random_tensor return output class A_ ( nn.Module ): def __init__( self : Union[str, Any] , snake_case__ : Optional[float] = None ): super().__init__() lowercase = drop_prob def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : torch.Tensor ): return drop_path(snake_case__ , self.drop_prob , self.training ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): return "p={}".format(self.drop_prob ) class A_ ( nn.Module ): def __init__( self : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : List[str]=None ): super().__init__() lowercase = patch_size if isinstance(snake_case__ , collections.abc.Iterable ) else (patch_size, patch_size) lowercase = stride if isinstance(snake_case__ , collections.abc.Iterable ) else (stride, stride) lowercase = padding if isinstance(snake_case__ , collections.abc.Iterable ) else (padding, padding) lowercase = nn.Convad(snake_case__ , snake_case__ , kernel_size=snake_case__ , stride=snake_case__ , padding=snake_case__ ) lowercase = norm_layer(snake_case__ ) if norm_layer else nn.Identity() def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[Any] ): lowercase = self.projection(snake_case__ ) lowercase = self.norm(snake_case__ ) return embeddings class A_ ( nn.GroupNorm ): def __init__( self : Union[str, Any] , snake_case__ : Dict , **snake_case__ : List[str] ): super().__init__(1 , snake_case__ , **snake_case__ ) class A_ ( nn.Module ): def __init__( self : int , snake_case__ : Any ): super().__init__() lowercase = nn.AvgPoolad(snake_case__ , stride=1 , padding=pool_size // 2 , count_include_pad=snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Union[str, Any] ): return self.pool(snake_case__ ) - hidden_states class A_ ( nn.Module ): def __init__( self : int , snake_case__ : Any , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Dict ): super().__init__() lowercase = nn.Convad(snake_case__ , snake_case__ , 1 ) lowercase = nn.Convad(snake_case__ , snake_case__ , 1 ) lowercase = PoolFormerDropPath(snake_case__ ) if isinstance(config.hidden_act , snake_case__ ): lowercase = ACTaFN[config.hidden_act] else: lowercase = config.hidden_act def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : Dict ): lowercase = self.conva(snake_case__ ) lowercase = self.act_fn(snake_case__ ) lowercase = self.drop(snake_case__ ) lowercase = self.conva(snake_case__ ) lowercase = self.drop(snake_case__ ) return hidden_states class A_ ( nn.Module ): def __init__( self : int , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[str] ): super().__init__() lowercase = PoolFormerPooling(snake_case__ ) lowercase = PoolFormerOutput(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowercase = PoolFormerGroupNorm(snake_case__ ) lowercase = PoolFormerGroupNorm(snake_case__ ) # Useful for training neural nets lowercase = PoolFormerDropPath(snake_case__ ) if drop_path > 0.0 else nn.Identity() lowercase = config.use_layer_scale if config.use_layer_scale: lowercase = nn.Parameter( config.layer_scale_init_value * torch.ones((snake_case__) ) , requires_grad=snake_case__ ) lowercase = nn.Parameter( config.layer_scale_init_value * torch.ones((snake_case__) ) , requires_grad=snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : List[str] ): if self.use_layer_scale: lowercase = self.pooling(self.before_norm(snake_case__ ) ) lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection lowercase = hidden_states + self.drop_path(snake_case__ ) lowercase = () lowercase = self.output(self.after_norm(snake_case__ ) ) lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection lowercase = hidden_states + self.drop_path(snake_case__ ) lowercase = (output,) + outputs return outputs else: lowercase = self.drop_path(self.pooling(self.before_norm(snake_case__ ) ) ) # First residual connection lowercase = pooling_output + hidden_states lowercase = () # Second residual connection inside the PoolFormerOutput block lowercase = self.drop_path(self.output(self.after_norm(snake_case__ ) ) ) lowercase = hidden_states + layer_output lowercase = (output,) + outputs return outputs class A_ ( nn.Module ): def __init__( self : List[str] , snake_case__ : Optional[Any] ): super().__init__() lowercase = config # stochastic depth decay rule lowercase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings lowercase = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) lowercase = nn.ModuleList(snake_case__ ) # Transformer blocks lowercase = [] lowercase = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers lowercase = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( snake_case__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(snake_case__ ) ) lowercase = nn.ModuleList(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True ): lowercase = () if output_hidden_states else None lowercase = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): lowercase , lowercase = layers # Get patch embeddings from hidden_states lowercase = embedding_layer(snake_case__ ) # Send the embeddings through the blocks for _, blk in enumerate(snake_case__ ): lowercase = blk(snake_case__ ) lowercase = layer_outputs[0] if output_hidden_states: lowercase = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ ) class A_ ( __a ): _A :Any = PoolFormerConfig _A :int = '''poolformer''' _A :Union[str, Any] = '''pixel_values''' _A :str = True def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Union[str, Any] ): if isinstance(snake_case__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(snake_case__ , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Any , snake_case__ : Optional[int]=False ): if isinstance(snake_case__ , snake_case__ ): lowercase = value __SCREAMING_SNAKE_CASE : Optional[Any] =R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' __SCREAMING_SNAKE_CASE : str =R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PoolFormerImageProcessor.__call__`] for details. ''' @add_start_docstrings( '''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , __a , ) class A_ ( __a ): def __init__( self : Union[str, Any] , snake_case__ : int ): super().__init__(snake_case__ ) lowercase = config lowercase = PoolFormerEncoder(snake_case__ ) # Initialize weights and apply final processing self.post_init() def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ): lowercase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) lowercase = self.encoder( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , ) lowercase = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=snake_case__ , hidden_states=encoder_outputs.hidden_states , ) class A_ ( nn.Module ): def __init__( self : List[str] , snake_case__ : Optional[int] ): super().__init__() lowercase = nn.Linear(config.hidden_size , config.hidden_size ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : str ): lowercase = self.dense(snake_case__ ) return output @add_start_docstrings( ''' PoolFormer Model transformer with an image classification head on top ''' , __a , ) class A_ ( __a ): def __init__( self : Dict , snake_case__ : Any ): super().__init__(snake_case__ ) lowercase = config.num_labels lowercase = PoolFormerModel(snake_case__ ) # Final norm lowercase = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head lowercase = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.LongTensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ): lowercase = return_dict if return_dict is not None else self.config.use_return_dict lowercase = self.poolformer( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , ) lowercase = outputs[0] lowercase = self.classifier(self.norm(snake_case__ ).mean([-2, -1] ) ) lowercase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase = """single_label_classification""" else: lowercase = """multi_label_classification""" if self.config.problem_type == "regression": lowercase = MSELoss() if self.num_labels == 1: lowercase = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowercase = loss_fct(snake_case__ , snake_case__ ) elif self.config.problem_type == "single_label_classification": lowercase = CrossEntropyLoss() lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase = BCEWithLogitsLoss() lowercase = loss_fct(snake_case__ , snake_case__ ) if not return_dict: lowercase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
72
1
__SCREAMING_SNAKE_CASE : Optional[Any] ={ '''Pillow''': '''Pillow''', '''accelerate''': '''accelerate>=0.11.0''', '''compel''': '''compel==0.1.8''', '''black''': '''black~=23.1''', '''datasets''': '''datasets''', '''filelock''': '''filelock''', '''flax''': '''flax>=0.4.1''', '''hf-doc-builder''': '''hf-doc-builder>=0.3.0''', '''huggingface-hub''': '''huggingface-hub>=0.13.2''', '''requests-mock''': '''requests-mock==1.10.0''', '''importlib_metadata''': '''importlib_metadata''', '''invisible-watermark''': '''invisible-watermark''', '''isort''': '''isort>=5.5.4''', '''jax''': '''jax>=0.2.8,!=0.3.2''', '''jaxlib''': '''jaxlib>=0.1.65''', '''Jinja2''': '''Jinja2''', '''k-diffusion''': '''k-diffusion>=0.0.12''', '''torchsde''': '''torchsde''', '''note_seq''': '''note_seq''', '''librosa''': '''librosa''', '''numpy''': '''numpy''', '''omegaconf''': '''omegaconf''', '''parameterized''': '''parameterized''', '''protobuf''': '''protobuf>=3.20.3,<4''', '''pytest''': '''pytest''', '''pytest-timeout''': '''pytest-timeout''', '''pytest-xdist''': '''pytest-xdist''', '''ruff''': '''ruff>=0.0.241''', '''safetensors''': '''safetensors''', '''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''', '''scipy''': '''scipy''', '''onnx''': '''onnx''', '''regex''': '''regex!=2019.12.17''', '''requests''': '''requests''', '''tensorboard''': '''tensorboard''', '''torch''': '''torch>=1.4''', '''torchvision''': '''torchvision''', '''transformers''': '''transformers>=4.25.1''', '''urllib3''': '''urllib3<=2.0.0''', }
72
from numpy import exp, pi, sqrt def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
72
1
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
72
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class A_ : _A :int _A :int class A_ : def __init__( self : List[str] , snake_case__ : int ): lowercase = [[] for _ in range(snake_case__ )] lowercase = size def __getitem__( self : Optional[int] , snake_case__ : int ): return iter(self._graph[vertex] ) @property def SCREAMING_SNAKE_CASE__ ( self : int ): return self._size def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int ): if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(snake_case__ , snake_case__ ) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : int , snake_case__ : int ): lowercase = deque([start_vertex] ) lowercase = [None] * self.size lowercase = 0 while queue: lowercase = queue.popleft() lowercase = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: lowercase = current_distance + edge.weight lowercase = distances[edge.destination_vertex] if ( isinstance(snake_case__ , snake_case__ ) and new_distance >= dest_vertex_distance ): continue lowercase = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
72
1
import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : List[str] =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Dict ='''https://openaipublic.azureedge.net/jukebox/models/''' __SCREAMING_SNAKE_CASE : Tuple ={ '''jukebox-1b-lyrics''': [ '''5b/vqvae.pth.tar''', '''5b/prior_level_0.pth.tar''', '''5b/prior_level_1.pth.tar''', '''1b_lyrics/prior_level_2.pth.tar''', ], '''jukebox-5b-lyrics''': [ '''5b/vqvae.pth.tar''', '''5b/prior_level_0.pth.tar''', '''5b/prior_level_1.pth.tar''', '''5b_lyrics/prior_level_2.pth.tar''', ], } def UpperCamelCase__ ( lowerCAmelCase__ ): if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10: lowercase = key.replace(""".model.1.bias""" ,""".conv1d_1.bias""" ) elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10: lowercase = key.replace(""".model.1.weight""" ,""".conv1d_1.weight""" ) elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10: lowercase = key.replace(""".model.3.bias""" ,""".conv1d_2.bias""" ) elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10: lowercase = key.replace(""".model.3.weight""" ,""".conv1d_2.weight""" ) if "conditioner_blocks.0." in key: lowercase = key.replace("""conditioner_blocks.0""" ,"""conditioner_blocks""" ) if "prime_prior" in key: lowercase = key.replace("""prime_prior""" ,"""encoder""" ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: lowercase = key.replace(""".emb.""" ,""".""" ) if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook return key.replace(""".k""" ,""".codebook""" ) if "y_emb." in key: return key.replace("""y_emb.""" ,"""metadata_embedding.""" ) if "x_emb.emb." in key: lowercase = key.replace("""0.x_emb.emb""" ,"""embed_tokens""" ) if "prime_state_ln" in key: return key.replace("""prime_state_ln""" ,"""encoder.final_layer_norm""" ) if ".ln" in key: return key.replace(""".ln""" ,""".layer_norm""" ) if "_ln" in key: return key.replace("""_ln""" ,"""_layer_norm""" ) if "prime_state_proj" in key: return key.replace("""prime_state_proj""" ,"""encoder.proj_in""" ) if "prime_x_out" in key: return key.replace("""prime_x_out""" ,"""encoder.lm_head""" ) if "prior.x_out" in key: return key.replace("""x_out""" ,"""fc_proj_out""" ) if "x_emb" in key: return key.replace("""x_emb""" ,"""embed_tokens""" ) return key def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = {} import re lowercase = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" ) lowercase = re.compile( r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" ) lowercase = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" ) lowercase = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" ) lowercase = re.compile( r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" ) lowercase = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" ) lowercase = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" ) lowercase = re.compile( r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" ) lowercase = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(lowerCAmelCase__ ): lowercase = re_encoder_block_conv_in.match(lowerCAmelCase__ ) lowercase = regex_match.groups() lowercase = int(groups[2] ) * 2 + int(groups[3] ) lowercase = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}""" lowercase = re_encoder_block_conv_in.sub(lowerCAmelCase__ ,lowerCAmelCase__ ) elif re_encoder_block_resnet.fullmatch(lowerCAmelCase__ ): lowercase = re_encoder_block_resnet.match(lowerCAmelCase__ ) lowercase = regex_match.groups() lowercase = int(groups[2] ) * 2 + int(groups[3] ) lowercase = {"""1""": 1, """3""": 2}[groups[-2]] lowercase = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.""" lowercase = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" lowercase = prefix + resnet_block lowercase = re_encoder_block_resnet.sub(lowerCAmelCase__ ,lowerCAmelCase__ ) elif re_encoder_block_proj_out.fullmatch(lowerCAmelCase__ ): lowercase = re_encoder_block_proj_out.match(lowerCAmelCase__ ) lowercase = regex_match.groups() lowercase = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}""" lowercase = re_encoder_block_proj_out.sub(lowerCAmelCase__ ,lowerCAmelCase__ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(lowerCAmelCase__ ): lowercase = re_decoder_block_conv_out.match(lowerCAmelCase__ ) lowercase = regex_match.groups() lowercase = int(groups[2] ) * 2 + int(groups[3] ) - 2 lowercase = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}""" lowercase = re_decoder_block_conv_out.sub(lowerCAmelCase__ ,lowerCAmelCase__ ) elif re_decoder_block_resnet.fullmatch(lowerCAmelCase__ ): lowercase = re_decoder_block_resnet.match(lowerCAmelCase__ ) lowercase = regex_match.groups() lowercase = int(groups[2] ) * 2 + int(groups[3] ) - 2 lowercase = {"""1""": 1, """3""": 2}[groups[-2]] lowercase = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.""" lowercase = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" lowercase = prefix + resnet_block lowercase = re_decoder_block_resnet.sub(lowerCAmelCase__ ,lowerCAmelCase__ ) elif re_decoder_block_proj_in.fullmatch(lowerCAmelCase__ ): lowercase = re_decoder_block_proj_in.match(lowerCAmelCase__ ) lowercase = regex_match.groups() lowercase = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}""" lowercase = re_decoder_block_proj_in.sub(lowerCAmelCase__ ,lowerCAmelCase__ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(lowerCAmelCase__ ): lowercase = re_prior_cond_conv_out.match(lowerCAmelCase__ ) lowercase = regex_match.groups() lowercase = int(groups[1] ) * 2 + int(groups[2] ) - 2 lowercase = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}""" lowercase = re_prior_cond_conv_out.sub(lowerCAmelCase__ ,lowerCAmelCase__ ) elif re_prior_cond_resnet.fullmatch(lowerCAmelCase__ ): lowercase = re_prior_cond_resnet.match(lowerCAmelCase__ ) lowercase = regex_match.groups() lowercase = int(groups[1] ) * 2 + int(groups[2] ) - 2 lowercase = {"""1""": 1, """3""": 2}[groups[-2]] lowercase = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.""" lowercase = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" lowercase = prefix + resnet_block lowercase = re_prior_cond_resnet.sub(lowerCAmelCase__ ,lowerCAmelCase__ ) elif re_prior_cond_proj_in.fullmatch(lowerCAmelCase__ ): lowercase = re_prior_cond_proj_in.match(lowerCAmelCase__ ) lowercase = regex_match.groups() lowercase = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}""" lowercase = re_prior_cond_proj_in.sub(lowerCAmelCase__ ,lowerCAmelCase__ ) # keep original key else: lowercase = original_key lowercase = replace_key(lowerCAmelCase__ ) if f"""{key_prefix}.{key}""" not in model_state_dict or key is None: print(f"""failed converting {original_key} to {key}, does not match""" ) # handle missmatched shape elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape: lowercase = model_state_dict[f"""{key_prefix}.{key}"""] print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" ) lowercase = original_key lowercase = original_key lowercase = value return new_dict @torch.no_grad() def UpperCamelCase__ ( lowerCAmelCase__=None ,lowerCAmelCase__=None ): for file in MODEL_MAPPING[model_name]: if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ): lowercase = requests.get(f"""{PREFIX}{file}""" ,allow_redirects=lowerCAmelCase__ ) os.makedirs(f"""{pytorch_dump_folder_path}/""" ,exist_ok=lowerCAmelCase__ ) open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ,"""wb""" ).write(r.content ) lowercase = MODEL_MAPPING[model_name.split("""/""" )[-1]] lowercase = JukeboxConfig.from_pretrained(lowerCAmelCase__ ) lowercase = JukeboxModel(lowerCAmelCase__ ) lowercase = [] lowercase = {} for i, dict_name in enumerate(lowerCAmelCase__ ): lowercase = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["""model"""] lowercase = {} for k in old_dic.keys(): if k.endswith(""".b""" ): lowercase = old_dic[k] elif k.endswith(""".w""" ): lowercase = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: lowercase = old_dic[k] else: lowercase = old_dic[k] lowercase = """vqvae""" if i == 0 else f"""priors.{3 - i}""" lowercase = fix_jukebox_keys(lowerCAmelCase__ ,model.state_dict() ,lowerCAmelCase__ ,lowerCAmelCase__ ) weight_dict.append(lowerCAmelCase__ ) lowercase = weight_dict.pop(0 ) model.vqvae.load_state_dict(lowerCAmelCase__ ) for i in range(len(lowerCAmelCase__ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) with open(f"""{pytorch_dump_folder_path}/mapping.json""" ,"""w""" ) as txtfile: json.dump(lowerCAmelCase__ ,lowerCAmelCase__ ) print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase__ ) return weight_dict if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''jukebox-5b-lyrics''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''jukebox-5b-lyrics-converted''', type=str, help='''Path to the output PyTorch model directory.''', ) __SCREAMING_SNAKE_CASE : List[Any] =parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
72
import math from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : str ={ '''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''', # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class A_ ( __a ): _A :Tuple = '''data2vec-audio''' def __init__( self : Optional[Any] , snake_case__ : List[Any]=32 , snake_case__ : List[Any]=7_68 , snake_case__ : int=12 , snake_case__ : Dict=12 , snake_case__ : List[str]=30_72 , snake_case__ : List[str]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : Tuple=0.0 , snake_case__ : Tuple=0.1 , snake_case__ : Any=0.1 , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-5 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : str=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : Any=False , snake_case__ : List[str]=16 , snake_case__ : Any=19 , snake_case__ : Optional[Any]=5 , snake_case__ : str=0.05 , snake_case__ : Tuple=10 , snake_case__ : Optional[Any]=2 , snake_case__ : Dict=0.0 , snake_case__ : int=10 , snake_case__ : Any=0 , snake_case__ : int="sum" , snake_case__ : str=False , snake_case__ : str=False , snake_case__ : Optional[int]=2_56 , snake_case__ : List[str]=(5_12, 5_12, 5_12, 5_12, 15_00) , snake_case__ : List[str]=(5, 3, 3, 1, 1) , snake_case__ : int=(1, 2, 3, 1, 1) , snake_case__ : Optional[Any]=5_12 , snake_case__ : Dict=0 , snake_case__ : Optional[Any]=1 , snake_case__ : Tuple=2 , snake_case__ : Tuple=False , snake_case__ : List[str]=3 , snake_case__ : List[str]=2 , snake_case__ : Tuple=3 , snake_case__ : List[str]=None , **snake_case__ : str , ): super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ ) lowercase = hidden_size lowercase = feat_extract_activation lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = conv_bias lowercase = num_conv_pos_embeddings lowercase = num_conv_pos_embedding_groups lowercase = conv_pos_kernel_size lowercase = len(self.conv_dim ) lowercase = num_hidden_layers lowercase = intermediate_size lowercase = hidden_act lowercase = num_attention_heads lowercase = hidden_dropout lowercase = attention_dropout lowercase = activation_dropout lowercase = feat_proj_dropout lowercase = final_dropout lowercase = layerdrop lowercase = layer_norm_eps lowercase = initializer_range lowercase = vocab_size lowercase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase = mask_time_prob lowercase = mask_time_length lowercase = mask_time_min_masks lowercase = mask_feature_prob lowercase = mask_feature_length lowercase = mask_feature_min_masks # ctc loss lowercase = ctc_loss_reduction lowercase = ctc_zero_infinity # adapter lowercase = add_adapter lowercase = adapter_kernel_size lowercase = adapter_stride lowercase = num_adapter_layers lowercase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = xvector_output_dim @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): return math.prod(self.conv_stride )
72
1
__SCREAMING_SNAKE_CASE : List[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/''' def UpperCamelCase__ ( lowerCAmelCase__ ): # Make sure the supplied data is a bytes-like object if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = f"""a bytes-like object is required, not '{data.__class__.__name__}'""" raise TypeError(lowerCAmelCase__ ) lowercase = """""".join(bin(lowerCAmelCase__ )[2:].zfill(8 ) for byte in data ) lowercase = len(lowerCAmelCase__ ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase = b"""=""" * ((6 - len(lowerCAmelCase__ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(lowerCAmelCase__ ) % 6) else: lowercase = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] ,2 )] for index in range(0 ,len(lowerCAmelCase__ ) ,6 ) ).encode() + padding ) def UpperCamelCase__ ( lowerCAmelCase__ ): # Make sure encoded_data is either a string or a bytes-like object if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) and not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = ( """argument should be a bytes-like object or ASCII string, """ f"""not '{encoded_data.__class__.__name__}'""" ) raise TypeError(lowerCAmelCase__ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ): try: lowercase = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) lowercase = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(lowerCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase = encoded_data[:-padding] lowercase = """""".join( bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase = """""".join( bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data ) lowercase = [ int(binary_stream[index : index + 8] ,2 ) for index in range(0 ,len(lowerCAmelCase__ ) ,8 ) ] return bytes(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
72
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase , lowercase = emb.weight.shape lowercase = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ ) lowercase = emb.weight.data return lin_layer def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = torch.load(lowerCAmelCase__ ,map_location="""cpu""" ) lowercase = Namespace(**checkpoint["""cfg"""]["""model"""] ) lowercase = checkpoint["""model"""] remove_ignore_keys_(lowerCAmelCase__ ) lowercase = state_dict["""decoder.embed_tokens.weight"""].shape[0] lowercase = {key.replace("""decoder""" ,"""model""" ): val for key, val in state_dict.items()} lowercase = XGLMConfig( vocab_size=lowerCAmelCase__ ,max_position_embeddings=args.max_target_positions ,num_layers=args.decoder_layers ,attention_heads=args.decoder_attention_heads ,ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.decoder_embed_dim ,layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="""gelu""" ,scale_embedding=not args.no_scale_embedding ,tie_word_embeddings=args.share_decoder_input_output_embed ,) lowercase = XGLMForCausalLM(lowerCAmelCase__ ) lowercase = model.load_state_dict(lowerCAmelCase__ ,strict=lowerCAmelCase__ ) print(lowerCAmelCase__ ) lowercase = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int =argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') __SCREAMING_SNAKE_CASE : Optional[Any] =parser.parse_args() __SCREAMING_SNAKE_CASE : Optional[int] =convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
72
1
import copy import re class A_ : _A :Optional[int] = '''hp''' _A :Union[str, Any] = {} _A :List[Any] = None @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] ): lowercase = prefix lowercase = defaults cls.build_naming_info() @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[str, Any] , snake_case__ : Tuple ): if len(snake_case__ ) == 0: return "" lowercase = None if any(char.isdigit() for char in word ): raise Exception(F"""Parameters should not contain numbers: '{word}' contains a number""" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(snake_case__ ) + 1 ): lowercase = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: lowercase = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(snake_case__ : Optional[int] ): lowercase = """""" while integer != 0: lowercase = chr(ord("""A""" ) + integer % 10 ) + s integer //= 10 return s lowercase = 0 while True: lowercase = word + """#""" + int_to_alphabetic(snake_case__ ) if sword in info["reverse_short_word"]: continue else: lowercase = sword break lowercase = short_word lowercase = word return short_word @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Tuple , snake_case__ : Dict ): lowercase = param_name.split("""_""" ) lowercase = [TrialShortNamer.shortname_for_word(snake_case__ , snake_case__ ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name lowercase = ["""""", """_"""] for separator in separators: lowercase = separator.join(snake_case__ ) if shortname not in info["reverse_short_param"]: lowercase = shortname lowercase = param_name return shortname return param_name @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Tuple , snake_case__ : List[Any] ): lowercase = TrialShortNamer.shortname_for_key(snake_case__ , snake_case__ ) lowercase = short_name lowercase = param_name @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Dict ): if cls.NAMING_INFO is not None: return lowercase = { """short_word""": {}, """reverse_short_word""": {}, """short_param""": {}, """reverse_short_param""": {}, } lowercase = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(snake_case__ , snake_case__ ) lowercase = info @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Any , snake_case__ : Optional[int] ): cls.build_naming_info() assert cls.PREFIX is not None lowercase = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue lowercase = cls.NAMING_INFO["""short_param"""][k] if isinstance(snake_case__ , snake_case__ ): lowercase = 1 if v else 0 lowercase = """""" if isinstance(snake_case__ , (int, float) ) else """-""" lowercase = F"""{key}{sep}{v}""" name.append(snake_case__ ) return "_".join(snake_case__ ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , snake_case__ : Any ): lowercase = repr[len(cls.PREFIX ) + 1 :] if repr == "": lowercase = [] else: lowercase = repr.split("""_""" ) lowercase = {} for value in values: if "-" in value: lowercase , lowercase = value.split("""-""" ) else: lowercase = re.sub("""[0-9.]""" , """""" , snake_case__ ) lowercase = float(re.sub("""[^0-9.]""" , """""" , snake_case__ ) ) lowercase = cls.NAMING_INFO["""reverse_short_param"""][p_k] lowercase = p_v for k in cls.DEFAULTS: if k not in parameters: lowercase = cls.DEFAULTS[k] return parameters
72
from __future__ import annotations import bisect def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): if hi < 0: lowercase = len(lowerCAmelCase__ ) while lo < hi: lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] < item: lowercase = mid + 1 else: lowercase = mid return lo def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): if hi < 0: lowercase = len(lowerCAmelCase__ ) while lo < hi: lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: lowercase = mid + 1 else: lowercase = mid return lo def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): sorted_collection.insert(bisect_left(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): sorted_collection.insert(bisect_right(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = 0 lowercase = len(lowerCAmelCase__ ) - 1 while left <= right: lowercase = left + (right - left) // 2 lowercase = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: lowercase = midpoint - 1 else: lowercase = midpoint + 1 return None def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = bisect.bisect_left(lowerCAmelCase__ ,lowerCAmelCase__ ) if index != len(lowerCAmelCase__ ) and sorted_collection[index] == item: return index return None def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): if right < left: return None lowercase = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,midpoint - 1 ) else: return binary_search_by_recursion(lowerCAmelCase__ ,lowerCAmelCase__ ,midpoint + 1 ,lowerCAmelCase__ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] =input('''Enter numbers separated by comma:\n''').strip() __SCREAMING_SNAKE_CASE : Tuple =sorted(int(item) for item in user_input.split(''',''')) __SCREAMING_SNAKE_CASE : Tuple =int(input('''Enter a single number to be found in the list:\n''')) __SCREAMING_SNAKE_CASE : Union[str, Any] =binary_search(collection, target) if result is None: print(f'''{target} was not found in {collection}.''') else: print(f'''{target} was found at position {result} in {collection}.''')
72
1
from ....utils import logging __SCREAMING_SNAKE_CASE : List[str] =logging.get_logger(__name__) class A_ ( __a ): def __init__( self : List[Any] , snake_case__ : str , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=20_48 ): lowercase = config.__dict__ lowercase = modal_hidden_size if num_labels: lowercase = num_labels
72
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = multiprocessing.Manager() lowercase = manager.list() lowercase = multiprocessing.Process(target=lowerCAmelCase__ ,args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil lowercase = shutil.rmtree lowercase = os.rmdir lowercase = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: lowercase = {} with swallow_io(): with time_limit(lowerCAmelCase__ ): exec(lowerCAmelCase__ ,lowerCAmelCase__ ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(f"""failed: {e}""" ) # Needed for cleaning up. lowercase = rmtree lowercase = rmdir lowercase = chdir @contextlib.contextmanager def UpperCamelCase__ ( lowerCAmelCase__ ): def signal_handler(lowerCAmelCase__ ,lowerCAmelCase__ ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL ,lowerCAmelCase__ ) signal.signal(signal.SIGALRM ,lowerCAmelCase__ ) try: yield finally: signal.setitimer(signal.ITIMER_REAL ,0 ) @contextlib.contextmanager def UpperCamelCase__ ( ): lowercase = WriteOnlyStringIO() with contextlib.redirect_stdout(lowerCAmelCase__ ): with contextlib.redirect_stderr(lowerCAmelCase__ ): with redirect_stdin(lowerCAmelCase__ ): yield @contextlib.contextmanager def UpperCamelCase__ ( ): with tempfile.TemporaryDirectory() as dirname: with chdir(lowerCAmelCase__ ): yield dirname class A_ ( __a ): pass class A_ ( io.StringIO ): def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *snake_case__ : int , **snake_case__ : int ): raise OSError def SCREAMING_SNAKE_CASE__ ( self : int , *snake_case__ : Optional[Any] , **snake_case__ : int ): raise OSError def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ): raise OSError def SCREAMING_SNAKE_CASE__ ( self : Dict , *snake_case__ : int , **snake_case__ : Any ): return False class A_ ( contextlib._RedirectStream ): # type: ignore _A :List[Any] = '''stdin''' @contextlib.contextmanager def UpperCamelCase__ ( lowerCAmelCase__ ): if root == ".": yield return lowercase = os.getcwd() os.chdir(lowerCAmelCase__ ) try: yield except BaseException as exc: raise exc finally: os.chdir(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__=None ): if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS ,(maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA ,(maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK ,(maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins lowercase = None lowercase = None import os lowercase = """1""" lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None import shutil lowercase = None lowercase = None lowercase = None import subprocess lowercase = None # type: ignore lowercase = None import sys lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None
72
1
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE : str =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Dict ={ '''nielsr/canine-s''': 2_048, } # Unicode defines 1,114,112 total “codepoints” __SCREAMING_SNAKE_CASE : int =1_114_112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py __SCREAMING_SNAKE_CASE : Dict =0 __SCREAMING_SNAKE_CASE : Optional[Any] =0XE_0_0_0 __SCREAMING_SNAKE_CASE : Tuple =0XE_0_0_1 __SCREAMING_SNAKE_CASE : str =0XE_0_0_2 __SCREAMING_SNAKE_CASE : str =0XE_0_0_3 __SCREAMING_SNAKE_CASE : Tuple =0XE_0_0_4 # Maps special codepoints to human-readable names. __SCREAMING_SNAKE_CASE : Dict[int, str] ={ # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. __SCREAMING_SNAKE_CASE : Dict[str, int] ={name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class A_ ( __a ): _A :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : int , snake_case__ : Optional[int]=chr(snake_case__ ) , snake_case__ : List[str]=chr(snake_case__ ) , snake_case__ : Tuple=chr(snake_case__ ) , snake_case__ : List[Any]=chr(snake_case__ ) , snake_case__ : int=chr(snake_case__ ) , snake_case__ : Union[str, Any]=chr(snake_case__ ) , snake_case__ : Tuple=False , snake_case__ : Optional[Any]=20_48 , **snake_case__ : Union[str, Any] , ): lowercase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token lowercase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token lowercase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token lowercase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token lowercase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , model_max_length=snake_case__ , **snake_case__ , ) # Creates a mapping for looking up the IDs of special symbols. lowercase = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): lowercase = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. lowercase = { codepoint: name for name, codepoint in self._special_codepoints.items() } lowercase = UNICODE_VOCAB_SIZE lowercase = len(self._special_codepoints ) @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): return self._unicode_vocab_size def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : str ): return list(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : str ): try: return ord(snake_case__ ) except TypeError: raise ValueError(F"""invalid token: '{token}'""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : int ): try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(snake_case__ ) except TypeError: raise ValueError(F"""invalid id: {index}""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : int ): return "".join(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] lowercase = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) lowercase = [1] + ([0] * len(snake_case__ )) + [1] if token_ids_a is not None: result += ([0] * len(snake_case__ )) + [1] return result def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] lowercase = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : str , snake_case__ : Optional[str] = None ): return ()
72
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A_ ( __a ): _A :Optional[int] = ['''image_processor''', '''tokenizer'''] _A :Tuple = '''BlipImageProcessor''' _A :List[Any] = '''AutoTokenizer''' def __init__( self : List[Any] , snake_case__ : Any , snake_case__ : Dict ): lowercase = False super().__init__(snake_case__ , snake_case__ ) lowercase = self.image_processor def __call__( self : List[str] , snake_case__ : ImageInput = None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : str , ): if images is None and text is None: raise ValueError("""You have to specify either images or text.""" ) # Get only text if images is None: lowercase = self.tokenizer lowercase = self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) return text_encoding # add pixel_values lowercase = self.image_processor(snake_case__ , return_tensors=snake_case__ ) if text is not None: lowercase = self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) else: lowercase = None if text_encoding is not None: encoding_image_processor.update(snake_case__ ) return encoding_image_processor def SCREAMING_SNAKE_CASE__ ( self : Dict , *snake_case__ : int , **snake_case__ : List[str] ): return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str , *snake_case__ : int , **snake_case__ : int ): return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = self.tokenizer.model_input_names lowercase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
72
1
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version __SCREAMING_SNAKE_CASE : Tuple =version.parse(importlib_metadata.version('''nltk''')) if NLTK_VERSION >= version.Version('''3.6.4'''): from nltk import word_tokenize __SCREAMING_SNAKE_CASE : str ='''\ @inproceedings{banarjee2005, title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments}, author = {Banerjee, Satanjeev and Lavie, Alon}, booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization}, month = jun, year = {2005}, address = {Ann Arbor, Michigan}, publisher = {Association for Computational Linguistics}, url = {https://www.aclweb.org/anthology/W05-0909}, pages = {65--72}, } ''' __SCREAMING_SNAKE_CASE : List[str] ='''\ METEOR, an automatic metric for machine translation evaluation that is based on a generalized concept of unigram matching between the machine-produced translation and human-produced reference translations. Unigrams can be matched based on their surface forms, stemmed forms, and meanings; furthermore, METEOR can be easily extended to include more advanced matching strategies. Once all generalized unigram matches between the two strings have been found, METEOR computes a score for this matching using a combination of unigram-precision, unigram-recall, and a measure of fragmentation that is designed to directly capture how well-ordered the matched words in the machine translation are in relation to the reference. METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic data and 0.331 on the Chinese data. This is shown to be an improvement on using simply unigram-precision, unigram-recall and their harmonic F1 combination. ''' __SCREAMING_SNAKE_CASE : Optional[Any] =''' Computes METEOR score of translated segments against one or more references. Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. alpha: Parameter for controlling relative weights of precision and recall. default: 0.9 beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3 gamma: Relative weight assigned to fragmentation penalty. default: 0.5 Returns: \'meteor\': meteor score. Examples: >>> meteor = datasets.load_metric(\'meteor\') >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"] >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"] >>> results = meteor.compute(predictions=predictions, references=references) >>> print(round(results["meteor"], 4)) 0.6944 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ ( datasets.Metric ): def SCREAMING_SNAKE_CASE__ ( self : List[str] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[ """https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""", """https://en.wikipedia.org/wiki/METEOR""", ] , ) def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : str ): import nltk nltk.download("""wordnet""" ) if NLTK_VERSION >= version.Version("""3.6.5""" ): nltk.download("""punkt""" ) if NLTK_VERSION >= version.Version("""3.6.6""" ): nltk.download("""omw-1.4""" ) def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : str , snake_case__ : str , snake_case__ : int=0.9 , snake_case__ : List[Any]=3 , snake_case__ : Any=0.5 ): if NLTK_VERSION >= version.Version("""3.6.5""" ): lowercase = [ meteor_score.single_meteor_score( word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ ) for ref, pred in zip(snake_case__ , snake_case__ ) ] else: lowercase = [ meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ ) for ref, pred in zip(snake_case__ , snake_case__ ) ] return {"meteor": np.mean(snake_case__ )}
72
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __SCREAMING_SNAKE_CASE : List[str] =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Any =OrderedDict( [ ('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''), ('''beit''', '''BeitFeatureExtractor'''), ('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''), ('''clap''', '''ClapFeatureExtractor'''), ('''clip''', '''CLIPFeatureExtractor'''), ('''clipseg''', '''ViTFeatureExtractor'''), ('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''), ('''convnext''', '''ConvNextFeatureExtractor'''), ('''cvt''', '''ConvNextFeatureExtractor'''), ('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''), ('''data2vec-vision''', '''BeitFeatureExtractor'''), ('''deformable_detr''', '''DeformableDetrFeatureExtractor'''), ('''deit''', '''DeiTFeatureExtractor'''), ('''detr''', '''DetrFeatureExtractor'''), ('''dinat''', '''ViTFeatureExtractor'''), ('''donut-swin''', '''DonutFeatureExtractor'''), ('''dpt''', '''DPTFeatureExtractor'''), ('''encodec''', '''EncodecFeatureExtractor'''), ('''flava''', '''FlavaFeatureExtractor'''), ('''glpn''', '''GLPNFeatureExtractor'''), ('''groupvit''', '''CLIPFeatureExtractor'''), ('''hubert''', '''Wav2Vec2FeatureExtractor'''), ('''imagegpt''', '''ImageGPTFeatureExtractor'''), ('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''), ('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''), ('''levit''', '''LevitFeatureExtractor'''), ('''maskformer''', '''MaskFormerFeatureExtractor'''), ('''mctct''', '''MCTCTFeatureExtractor'''), ('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''), ('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''), ('''mobilevit''', '''MobileViTFeatureExtractor'''), ('''nat''', '''ViTFeatureExtractor'''), ('''owlvit''', '''OwlViTFeatureExtractor'''), ('''perceiver''', '''PerceiverFeatureExtractor'''), ('''poolformer''', '''PoolFormerFeatureExtractor'''), ('''regnet''', '''ConvNextFeatureExtractor'''), ('''resnet''', '''ConvNextFeatureExtractor'''), ('''segformer''', '''SegformerFeatureExtractor'''), ('''sew''', '''Wav2Vec2FeatureExtractor'''), ('''sew-d''', '''Wav2Vec2FeatureExtractor'''), ('''speech_to_text''', '''Speech2TextFeatureExtractor'''), ('''speecht5''', '''SpeechT5FeatureExtractor'''), ('''swiftformer''', '''ViTFeatureExtractor'''), ('''swin''', '''ViTFeatureExtractor'''), ('''swinv2''', '''ViTFeatureExtractor'''), ('''table-transformer''', '''DetrFeatureExtractor'''), ('''timesformer''', '''VideoMAEFeatureExtractor'''), ('''tvlt''', '''TvltFeatureExtractor'''), ('''unispeech''', '''Wav2Vec2FeatureExtractor'''), ('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''), ('''van''', '''ConvNextFeatureExtractor'''), ('''videomae''', '''VideoMAEFeatureExtractor'''), ('''vilt''', '''ViltFeatureExtractor'''), ('''vit''', '''ViTFeatureExtractor'''), ('''vit_mae''', '''ViTFeatureExtractor'''), ('''vit_msn''', '''ViTFeatureExtractor'''), ('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''), ('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''), ('''wavlm''', '''Wav2Vec2FeatureExtractor'''), ('''whisper''', '''WhisperFeatureExtractor'''), ('''xclip''', '''CLIPFeatureExtractor'''), ('''yolos''', '''YolosFeatureExtractor'''), ] ) __SCREAMING_SNAKE_CASE : Tuple =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def UpperCamelCase__ ( lowerCAmelCase__ ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: lowercase = model_type_to_module_name(lowerCAmelCase__ ) lowercase = importlib.import_module(f""".{module_name}""" ,"""transformers.models""" ) try: return getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(lowerCAmelCase__ ,"""__name__""" ,lowerCAmelCase__ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowercase = importlib.import_module("""transformers""" ) if hasattr(lowerCAmelCase__ ,lowerCAmelCase__ ): return getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) return None def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = None ,lowerCAmelCase__ = False ,lowerCAmelCase__ = False ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = False ,**lowerCAmelCase__ ,): lowercase = get_file_from_repo( lowerCAmelCase__ ,lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,use_auth_token=lowerCAmelCase__ ,revision=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(lowerCAmelCase__ ,encoding="""utf-8""" ) as reader: return json.load(lowerCAmelCase__ ) class A_ : def __init__( self : List[Any] ): raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( cls : Dict , snake_case__ : Tuple , **snake_case__ : int ): lowercase = kwargs.pop("""config""" , snake_case__ ) lowercase = kwargs.pop("""trust_remote_code""" , snake_case__ ) lowercase = True lowercase , lowercase = FeatureExtractionMixin.get_feature_extractor_dict(snake_case__ , **snake_case__ ) lowercase = config_dict.get("""feature_extractor_type""" , snake_case__ ) lowercase = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): lowercase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(snake_case__ , snake_case__ ): lowercase = AutoConfig.from_pretrained(snake_case__ , **snake_case__ ) # It could be in `config.feature_extractor_type`` lowercase = getattr(snake_case__ , """feature_extractor_type""" , snake_case__ ) if hasattr(snake_case__ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: lowercase = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: lowercase = feature_extractor_class_from_name(snake_case__ ) lowercase = feature_extractor_auto_map is not None lowercase = feature_extractor_class is not None or type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING lowercase = resolve_trust_remote_code( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if has_remote_code and trust_remote_code: lowercase = get_class_from_dynamic_module( snake_case__ , snake_case__ , **snake_case__ ) lowercase = kwargs.pop("""code_revision""" , snake_case__ ) if os.path.isdir(snake_case__ ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(snake_case__ , **snake_case__ ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(snake_case__ , **snake_case__ ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING: lowercase = FEATURE_EXTRACTOR_MAPPING[type(snake_case__ )] return feature_extractor_class.from_dict(snake_case__ , **snake_case__ ) raise ValueError( F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """ F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """ F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Optional[int] , snake_case__ : List[str] ): FEATURE_EXTRACTOR_MAPPING.register(snake_case__ , snake_case__ )
72
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : Tuple ={ '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] =[ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] =[ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] =[ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __SCREAMING_SNAKE_CASE : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
72
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Any =logging.get_logger('''transformers.models.speecht5''') __SCREAMING_SNAKE_CASE : Optional[Any] ={ '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } __SCREAMING_SNAKE_CASE : Union[str, Any] ={ '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } __SCREAMING_SNAKE_CASE : Optional[int] ={ '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } __SCREAMING_SNAKE_CASE : List[Any] ={ '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } __SCREAMING_SNAKE_CASE : List[Any] ={ '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } __SCREAMING_SNAKE_CASE : Optional[Any] ={ '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } __SCREAMING_SNAKE_CASE : Optional[int] ={ '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } __SCREAMING_SNAKE_CASE : List[Any] ={ '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } __SCREAMING_SNAKE_CASE : List[Any] ={ **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __SCREAMING_SNAKE_CASE : List[str] ={ **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __SCREAMING_SNAKE_CASE : Optional[int] ={ **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __SCREAMING_SNAKE_CASE : Dict =[] __SCREAMING_SNAKE_CASE : List[str] =[ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] __SCREAMING_SNAKE_CASE : List[str] =IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] __SCREAMING_SNAKE_CASE : Any =IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] __SCREAMING_SNAKE_CASE : Any =IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): for attribute in key.split(""".""" ): lowercase = getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) if weight_type is not None: lowercase = getattr(lowerCAmelCase__ ,lowerCAmelCase__ ).shape else: lowercase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase = value elif weight_type == "weight_g": lowercase = value elif weight_type == "weight_v": lowercase = value elif weight_type == "bias": lowercase = value elif weight_type == "running_mean": lowercase = value elif weight_type == "running_var": lowercase = value elif weight_type == "num_batches_tracked": lowercase = value else: lowercase = value logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowercase , lowercase = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = [] if task == "s2t": lowercase = hf_model.speechta.encoder.prenet.feature_encoder lowercase = MAPPING_S2T lowercase = IGNORE_KEYS_S2T elif task == "t2s": lowercase = None lowercase = MAPPING_T2S lowercase = IGNORE_KEYS_T2S elif task == "s2s": lowercase = hf_model.speechta.encoder.prenet.feature_encoder lowercase = MAPPING_S2S lowercase = IGNORE_KEYS_S2S else: raise ValueError(f"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(lowerCAmelCase__ ,lowerCAmelCase__ ): logger.info(f"""{name} was ignored""" ) continue lowercase = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,hf_model.config.feat_extract_norm == """group""" ,) lowercase = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: lowercase , lowercase = key.split(""".*.""" ) if prefix in name and suffix in name: lowercase = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: lowercase = True if "*" in mapped_key: lowercase = name.split(lowerCAmelCase__ )[0].split(""".""" )[-2] lowercase = mapped_key.replace("""*""" ,lowerCAmelCase__ ) if "weight_g" in name: lowercase = """weight_g""" elif "weight_v" in name: lowercase = """weight_v""" elif "bias" in name: lowercase = """bias""" elif "weight" in name: lowercase = """weight""" elif "running_mean" in name: lowercase = """running_mean""" elif "running_var" in name: lowercase = """running_var""" elif "num_batches_tracked" in name: lowercase = """num_batches_tracked""" else: lowercase = None set_recursively(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = full_name.split("""conv_layers.""" )[-1] lowercase = name.split(""".""" ) lowercase = int(items[0] ) lowercase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,): if config_path is not None: lowercase = SpeechTaConfig.from_pretrained(lowerCAmelCase__ ) else: lowercase = SpeechTaConfig() if task == "s2t": lowercase = config.max_text_positions lowercase = SpeechTaForSpeechToText(lowerCAmelCase__ ) elif task == "t2s": lowercase = 1_876 lowercase = 600 lowercase = config.max_speech_positions lowercase = SpeechTaForTextToSpeech(lowerCAmelCase__ ) elif task == "s2s": lowercase = 1_876 lowercase = config.max_speech_positions lowercase = SpeechTaForSpeechToSpeech(lowerCAmelCase__ ) else: raise ValueError(f"""Unknown task name: {task}""" ) if vocab_path: lowercase = SpeechTaTokenizer(lowerCAmelCase__ ,model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it lowercase = AddedToken("""<mask>""" ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) lowercase = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) lowercase = SpeechTaFeatureExtractor() lowercase = SpeechTaProcessor(tokenizer=lowerCAmelCase__ ,feature_extractor=lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) lowercase = torch.load(lowerCAmelCase__ ) recursively_load_weights(fairseq_checkpoint["""model"""] ,lowerCAmelCase__ ,lowerCAmelCase__ ) model.save_pretrained(lowerCAmelCase__ ) if repo_id: print("""Pushing to the hub...""" ) processor.push_to_hub(lowerCAmelCase__ ) model.push_to_hub(lowerCAmelCase__ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[Any] =argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) __SCREAMING_SNAKE_CASE : Optional[Any] =parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
72
1
import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments __SCREAMING_SNAKE_CASE : Dict =logging.getLogger(__name__) @dataclass class A_ ( __a ): _A :Optional[float] = field( default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} ) _A :bool = field(default=__a , metadata={'''help''': '''Whether to SortishSamler or not.'''} ) _A :bool = field( default=__a , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) _A :bool = field(default=__a , metadata={'''help''': '''whether to use adafactor'''} ) _A :Optional[float] = field( default=__a , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} ) _A :Optional[float] = field( default=__a , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} ) _A :Optional[float] = field(default=__a , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} ) _A :Optional[float] = field( default=__a , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} ) _A :Optional[str] = field( default='''linear''' , metadata={'''help''': f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
72
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __SCREAMING_SNAKE_CASE : List[Any] ='''.''' if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] =os.path.join(REPO_PATH, '''utils/documentation_tests.txt''') __SCREAMING_SNAKE_CASE : Dict =[] __SCREAMING_SNAKE_CASE : Dict =[] with open(doctest_file_path) as fp: for line in fp: __SCREAMING_SNAKE_CASE : Optional[Any] =line.strip() __SCREAMING_SNAKE_CASE : Tuple =os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __SCREAMING_SNAKE_CASE : Optional[Any] ='''\n'''.join(non_existent_paths) raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''') if all_paths != sorted(all_paths): raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
72
1
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad ,grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad ,grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ): model.train() lowercase = model(lowerCAmelCase__ ) lowercase = F.mse_loss(lowerCAmelCase__ ,target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ): set_seed(42 ) lowercase = RegressionModel() lowercase = deepcopy(lowerCAmelCase__ ) lowercase = RegressionDataset(length=80 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) model.to(accelerator.device ) if sched: lowercase = AdamW(params=model.parameters() ,lr=1E-3 ) lowercase = AdamW(params=ddp_model.parameters() ,lr=1E-3 ) lowercase = LambdaLR(lowerCAmelCase__ ,lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 ) lowercase = LambdaLR(lowerCAmelCase__ ,lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 ) # Make a copy of `model` if sched: lowercase , lowercase , lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def UpperCamelCase__ ( lowerCAmelCase__ ): # Test when on a single CPU or GPU that the context manager does nothing lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ) # Use a single batch lowercase , lowercase = next(iter(lowerCAmelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: # Sync grads step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad ,ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] def UpperCamelCase__ ( lowerCAmelCase__ ): # Test on distributed setup that context manager behaves properly lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ) # Use a single batch lowercase , lowercase = next(iter(lowerCAmelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: # Sync grads step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] def UpperCamelCase__ ( lowerCAmelCase__=False ,lowerCAmelCase__=False ): lowercase = Accelerator( split_batches=lowerCAmelCase__ ,dispatch_batches=lowerCAmelCase__ ,gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ) for iteration, batch in enumerate(lowerCAmelCase__ ): lowercase , lowercase = batch.values() # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] GradientState._reset_state() def UpperCamelCase__ ( lowerCAmelCase__=False ,lowerCAmelCase__=False ): lowercase = Accelerator( split_batches=lowerCAmelCase__ ,dispatch_batches=lowerCAmelCase__ ,gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ,lowerCAmelCase__ ) for iteration, batch in enumerate(lowerCAmelCase__ ): lowercase , lowercase = batch.values() # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase__ )) if accelerator.num_processes > 1: check_model_parameters(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def UpperCamelCase__ ( ): lowercase = Accelerator() lowercase = RegressionDataset(length=80 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) lowercase = RegressionDataset(length=96 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowerCAmelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ ) if iteration < len(lowerCAmelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowerCAmelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ ) if batch_num < len(lowerCAmelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def UpperCamelCase__ ( ): lowercase = Accelerator() lowercase = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(lowerCAmelCase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(lowerCAmelCase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ ,f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,) test_gradient_accumulation(lowerCAmelCase__ ,lowerCAmelCase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" ,"""2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,"""`split_batches=False`, `dispatch_batches=False`**""" ,) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,) test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase__ ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
72
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : Tuple ={ '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] =[ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] =[ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] =[ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __SCREAMING_SNAKE_CASE : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
72
1
from typing import List, Optional, Union import torch from transformers import ( XLMRobertaTokenizer, ) from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) from .text_encoder import MultilingualCLIP __SCREAMING_SNAKE_CASE : Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name __SCREAMING_SNAKE_CASE : List[str] =''' Examples: ```py >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline >>> import torch >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> negative_image_emb = out.negative_image_embeds >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1") >>> pipe.to("cuda") >>> image = pipe( ... prompt, ... image_embeds=image_emb, ... negative_image_embeds=negative_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... ).images >>> image[0].save("cat.png") ``` ''' def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=8 ): lowercase = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 lowercase = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class A_ ( __a ): def __init__( self : int , snake_case__ : MultilingualCLIP , snake_case__ : XLMRobertaTokenizer , snake_case__ : UNetaDConditionModel , snake_case__ : Union[DDIMScheduler, DDPMScheduler] , snake_case__ : VQModel , ): super().__init__() self.register_modules( text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , ) lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any] ): if latents is None: lowercase = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ ) else: if latents.shape != shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) lowercase = latents.to(snake_case__ ) lowercase = latents * scheduler.init_noise_sigma return latents def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Optional[int]=None , ): lowercase = len(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else 1 # get prompt text embeddings lowercase = self.tokenizer( snake_case__ , padding="""max_length""" , truncation=snake_case__ , max_length=77 , return_attention_mask=snake_case__ , add_special_tokens=snake_case__ , return_tensors="""pt""" , ) lowercase = text_inputs.input_ids lowercase = self.tokenizer(snake_case__ , padding="""longest""" , return_tensors="""pt""" ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(snake_case__ , snake_case__ ): lowercase = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) lowercase = text_input_ids.to(snake_case__ ) lowercase = text_inputs.attention_mask.to(snake_case__ ) lowercase , lowercase = self.text_encoder( input_ids=snake_case__ , attention_mask=snake_case__ ) lowercase = prompt_embeds.repeat_interleave(snake_case__ , dim=0 ) lowercase = text_encoder_hidden_states.repeat_interleave(snake_case__ , dim=0 ) lowercase = text_mask.repeat_interleave(snake_case__ , dim=0 ) if do_classifier_free_guidance: lowercase = 42 if negative_prompt is None: lowercase = [""""""] * batch_size elif type(snake_case__ ) is not type(snake_case__ ): raise TypeError( F"""`negative_prompt` should be the same type to `prompt`, but got {type(snake_case__ )} !=""" F""" {type(snake_case__ )}.""" ) elif isinstance(snake_case__ , snake_case__ ): lowercase = [negative_prompt] elif batch_size != len(snake_case__ ): raise ValueError( F"""`negative_prompt`: {negative_prompt} has batch size {len(snake_case__ )}, but `prompt`:""" F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" """ the batch size of `prompt`.""" ) else: lowercase = negative_prompt lowercase = self.tokenizer( snake_case__ , padding="""max_length""" , max_length=77 , truncation=snake_case__ , return_attention_mask=snake_case__ , add_special_tokens=snake_case__ , return_tensors="""pt""" , ) lowercase = uncond_input.input_ids.to(snake_case__ ) lowercase = uncond_input.attention_mask.to(snake_case__ ) lowercase , lowercase = self.text_encoder( input_ids=snake_case__ , attention_mask=snake_case__ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowercase = negative_prompt_embeds.shape[1] lowercase = negative_prompt_embeds.repeat(1 , snake_case__ ) lowercase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case__ ) lowercase = uncond_text_encoder_hidden_states.shape[1] lowercase = uncond_text_encoder_hidden_states.repeat(1 , snake_case__ , 1 ) lowercase = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt , snake_case__ , -1 ) lowercase = uncond_text_mask.repeat_interleave(snake_case__ , dim=0 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowercase = torch.cat([negative_prompt_embeds, prompt_embeds] ) lowercase = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] ) lowercase = torch.cat([uncond_text_mask, text_mask] ) return prompt_embeds, text_encoder_hidden_states, text_mask def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : str=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) lowercase = torch.device(F"""cuda:{gpu_id}""" ) lowercase = [ self.unet, self.text_encoder, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Optional[int]=0 ): if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) lowercase = torch.device(F"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=snake_case__ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowercase = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]: lowercase , lowercase = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ ) if self.safety_checker is not None: lowercase , lowercase = cpu_offload_with_hook(self.safety_checker , snake_case__ , prev_module_hook=snake_case__ ) # We'll offload the last model manually. lowercase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def SCREAMING_SNAKE_CASE__ ( self : str ): if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(snake_case__ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(snake_case__ ) def __call__( self : Union[str, Any] , snake_case__ : Union[str, List[str]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : int = 5_12 , snake_case__ : int = 5_12 , snake_case__ : int = 1_00 , snake_case__ : float = 4.0 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ): if isinstance(snake_case__ , snake_case__ ): lowercase = 1 elif isinstance(snake_case__ , snake_case__ ): lowercase = len(snake_case__ ) else: raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}""" ) lowercase = self._execution_device lowercase = batch_size * num_images_per_prompt lowercase = guidance_scale > 1.0 lowercase , lowercase , lowercase = self._encode_prompt( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if isinstance(snake_case__ , snake_case__ ): lowercase = torch.cat(snake_case__ , dim=0 ) if isinstance(snake_case__ , snake_case__ ): lowercase = torch.cat(snake_case__ , dim=0 ) if do_classifier_free_guidance: lowercase = image_embeds.repeat_interleave(snake_case__ , dim=0 ) lowercase = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 ) lowercase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to( dtype=prompt_embeds.dtype , device=snake_case__ ) self.scheduler.set_timesteps(snake_case__ , device=snake_case__ ) lowercase = self.scheduler.timesteps lowercase = self.unet.config.in_channels lowercase , lowercase = get_new_h_w(snake_case__ , snake_case__ , self.movq_scale_factor ) # create initial latent lowercase = self.prepare_latents( (batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , snake_case__ , snake_case__ , snake_case__ , self.scheduler , ) for i, t in enumerate(self.progress_bar(snake_case__ ) ): # expand the latents if we are doing classifier free guidance lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds} lowercase = self.unet( sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0] if do_classifier_free_guidance: lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 ) lowercase , lowercase = noise_pred.chunk(2 ) lowercase , lowercase = variance_pred.chunk(2 ) lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowercase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 lowercase = self.scheduler.step( snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , ).prev_sample # post-processing lowercase = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: lowercase = image * 0.5 + 0.5 lowercase = image.clamp(0 , 1 ) lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowercase = self.numpy_to_pil(snake_case__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case__ )
72
import argparse import os import re import packaging.version __SCREAMING_SNAKE_CASE : Optional[int] ='''examples/''' __SCREAMING_SNAKE_CASE : Any ={ '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } __SCREAMING_SNAKE_CASE : Union[str, Any] ={ '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } __SCREAMING_SNAKE_CASE : Any ='''README.md''' def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): with open(lowerCAmelCase__ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: lowercase = f.read() lowercase , lowercase = REPLACE_PATTERNS[pattern] lowercase = replace.replace("""VERSION""" ,lowerCAmelCase__ ) lowercase = re_pattern.sub(lowerCAmelCase__ ,lowerCAmelCase__ ) with open(lowerCAmelCase__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.write(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): for folder, directories, fnames in os.walk(lowerCAmelCase__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ,pattern="""examples""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) if not patch: update_version_in_examples(lowerCAmelCase__ ) def UpperCamelCase__ ( ): lowercase = """🤗 Transformers currently provides the following architectures""" lowercase = """1. Want to contribute a new model?""" with open(lowerCAmelCase__ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: lowercase = f.readlines() # Find the start of the list. lowercase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowercase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowercase = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" ,"""https://huggingface.co/docs/transformers/model_doc""" ,) index += 1 with open(lowerCAmelCase__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.writelines(lowerCAmelCase__ ) def UpperCamelCase__ ( ): with open(REPLACE_FILES["""init"""] ,"""r""" ) as f: lowercase = f.read() lowercase = REPLACE_PATTERNS["""init"""][0].search(lowerCAmelCase__ ).groups()[0] return packaging.version.parse(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__=False ): lowercase = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowercase = default_version.base_version elif patch: lowercase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: lowercase = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. lowercase = input(f"""Which version are you releasing? [{default_version}]""" ) if len(lowerCAmelCase__ ) == 0: lowercase = default_version print(f"""Updating version to {version}.""" ) global_version_update(lowerCAmelCase__ ,patch=lowerCAmelCase__ ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def UpperCamelCase__ ( ): lowercase = get_version() lowercase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" lowercase = current_version.base_version # Check with the user we got that right. lowercase = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(lowerCAmelCase__ ) == 0: lowercase = dev_version print(f"""Updating version to {version}.""" ) global_version_update(lowerCAmelCase__ ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[Any] =argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') __SCREAMING_SNAKE_CASE : Optional[int] =parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
72
1
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" ,[ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" ,"""w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" ,"""w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" ,"""w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) lowercase = DatasetInfosDict.from_directory(lowerCAmelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( """dataset_info""" ,[ DatasetInfo(), DatasetInfo( description="""foo""" ,features=Features({"""a""": Value("""int32""" )} ) ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train"""}] ,download_size=42 ,), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = str(lowerCAmelCase__ ) dataset_info.write_to_directory(lowerCAmelCase__ ) lowercase = DatasetInfo.from_directory(lowerCAmelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(lowerCAmelCase__ ,"""dataset_info.json""" ) ) def UpperCamelCase__ ( ): lowercase = DatasetInfo( description="""foo""" ,citation="""bar""" ,homepage="""https://foo.bar""" ,license="""CC0""" ,features=Features({"""a""": Value("""int32""" )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train""", """num_examples""": 42}] ,download_checksums={} ,download_size=1_337 ,post_processing_size=442 ,dataset_size=1_234 ,size_in_bytes=1_337 + 442 + 1_234 ,) lowercase = dataset_info._to_yaml_dict() assert sorted(lowerCAmelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) ) lowercase = yaml.safe_dump(lowerCAmelCase__ ) lowercase = yaml.safe_load(lowerCAmelCase__ ) assert dataset_info_yaml_dict == reloaded def UpperCamelCase__ ( ): lowercase = DatasetInfo() lowercase = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" ,[ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" ,features=Features({"""a""": Value("""int32""" )} ) ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train"""}] ,download_size=42 ,) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=42 ), """v2""": DatasetInfo(dataset_size=1_337 ), } ), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = str(lowerCAmelCase__ ) dataset_infos_dict.write_to_directory(lowerCAmelCase__ ) lowercase = DatasetInfosDict.from_directory(lowerCAmelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): lowercase = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml lowercase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(lowerCAmelCase__ ,"""README.md""" ) )
72
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple ={ '''google/pix2struct-textcaps-base''': ( '''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json''' ), } class A_ ( __a ): _A :List[str] = '''pix2struct_text_model''' _A :int = ['''past_key_values'''] _A :Optional[Any] = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : int , snake_case__ : str=5_02_44 , snake_case__ : Dict=7_68 , snake_case__ : Optional[Any]=64 , snake_case__ : Union[str, Any]=20_48 , snake_case__ : Union[str, Any]=12 , snake_case__ : str=12 , snake_case__ : int=32 , snake_case__ : List[Any]=1_28 , snake_case__ : Optional[int]=0.1 , snake_case__ : int=1E-6 , snake_case__ : int=1.0 , snake_case__ : Dict="gelu_new" , snake_case__ : Union[str, Any]=0 , snake_case__ : str=False , snake_case__ : List[str]=0 , snake_case__ : str=1 , snake_case__ : Optional[Any]=False , snake_case__ : Tuple=True , **snake_case__ : List[str] , ): lowercase = vocab_size lowercase = hidden_size lowercase = d_kv lowercase = d_ff lowercase = num_layers lowercase = num_heads lowercase = relative_attention_num_buckets lowercase = relative_attention_max_distance lowercase = dropout_rate lowercase = layer_norm_epsilon lowercase = initializer_factor lowercase = use_cache lowercase = eos_token_id lowercase = decoder_start_token_id # for backwards compatibility lowercase = dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ): cls._set_token_in_kwargs(snake_case__ ) lowercase , lowercase = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": lowercase = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class A_ ( __a ): _A :Optional[int] = '''pix2struct_vision_model''' def __init__( self : Tuple , snake_case__ : Union[str, Any]=7_68 , snake_case__ : Any=7_68 , snake_case__ : Dict=20_48 , snake_case__ : int=64 , snake_case__ : str=12 , snake_case__ : Optional[int]=12 , snake_case__ : Union[str, Any]="gelu_new" , snake_case__ : Union[str, Any]=1E-6 , snake_case__ : int=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : Optional[int]=1E-10 , snake_case__ : Optional[int]=1.0 , snake_case__ : Optional[Any]=40_96 , snake_case__ : Optional[int]=32 , snake_case__ : List[Any]=1_28 , **snake_case__ : Union[str, Any] , ): super().__init__(**snake_case__ ) lowercase = hidden_size lowercase = patch_embed_hidden_size lowercase = d_ff lowercase = dropout_rate lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = initializer_range lowercase = initializer_factor lowercase = attention_dropout lowercase = layer_norm_eps lowercase = dense_act_fn lowercase = seq_len lowercase = relative_attention_num_buckets lowercase = relative_attention_max_distance lowercase = d_kv @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ): cls._set_token_in_kwargs(snake_case__ ) lowercase , lowercase = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": lowercase = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class A_ ( __a ): _A :int = '''pix2struct''' _A :str = True def __init__( self : Optional[int] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=1.0 , snake_case__ : Any=0.02 , snake_case__ : Tuple=False , snake_case__ : Union[str, Any]=False , snake_case__ : Tuple=True , **snake_case__ : int , ): super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: lowercase = {} logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" ) if vision_config is None: lowercase = {} logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" ) lowercase = PixaStructTextConfig(**snake_case__ ) lowercase = PixaStructVisionConfig(**snake_case__ ) lowercase = self.text_config.decoder_start_token_id lowercase = self.text_config.pad_token_id lowercase = self.text_config.eos_token_id lowercase = initializer_factor lowercase = initializer_range lowercase = self.initializer_range lowercase = self.initializer_range lowercase = is_vqa @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : Any ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = copy.deepcopy(self.__dict__ ) lowercase = self.text_config.to_dict() lowercase = self.vision_config.to_dict() lowercase = self.__class__.model_type return output
72
1
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): if principal <= 0: raise Exception("""Principal borrowed must be > 0""" ) if rate_per_annum < 0: raise Exception("""Rate of interest must be >= 0""" ) if years_to_repay <= 0 or not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ): raise Exception("""Years to repay must be an integer > 0""" ) # Yearly rate is divided by 12 to get monthly rate lowercase = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly lowercase = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
72
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad ,grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad ,grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ): model.train() lowercase = model(lowerCAmelCase__ ) lowercase = F.mse_loss(lowerCAmelCase__ ,target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ): set_seed(42 ) lowercase = RegressionModel() lowercase = deepcopy(lowerCAmelCase__ ) lowercase = RegressionDataset(length=80 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) model.to(accelerator.device ) if sched: lowercase = AdamW(params=model.parameters() ,lr=1E-3 ) lowercase = AdamW(params=ddp_model.parameters() ,lr=1E-3 ) lowercase = LambdaLR(lowerCAmelCase__ ,lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 ) lowercase = LambdaLR(lowerCAmelCase__ ,lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 ) # Make a copy of `model` if sched: lowercase , lowercase , lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def UpperCamelCase__ ( lowerCAmelCase__ ): # Test when on a single CPU or GPU that the context manager does nothing lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ) # Use a single batch lowercase , lowercase = next(iter(lowerCAmelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: # Sync grads step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad ,ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] def UpperCamelCase__ ( lowerCAmelCase__ ): # Test on distributed setup that context manager behaves properly lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ) # Use a single batch lowercase , lowercase = next(iter(lowerCAmelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: # Sync grads step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] def UpperCamelCase__ ( lowerCAmelCase__=False ,lowerCAmelCase__=False ): lowercase = Accelerator( split_batches=lowerCAmelCase__ ,dispatch_batches=lowerCAmelCase__ ,gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ) for iteration, batch in enumerate(lowerCAmelCase__ ): lowercase , lowercase = batch.values() # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] GradientState._reset_state() def UpperCamelCase__ ( lowerCAmelCase__=False ,lowerCAmelCase__=False ): lowercase = Accelerator( split_batches=lowerCAmelCase__ ,dispatch_batches=lowerCAmelCase__ ,gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ,lowerCAmelCase__ ) for iteration, batch in enumerate(lowerCAmelCase__ ): lowercase , lowercase = batch.values() # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase__ )) if accelerator.num_processes > 1: check_model_parameters(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def UpperCamelCase__ ( ): lowercase = Accelerator() lowercase = RegressionDataset(length=80 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) lowercase = RegressionDataset(length=96 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowerCAmelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ ) if iteration < len(lowerCAmelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowerCAmelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ ) if batch_num < len(lowerCAmelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def UpperCamelCase__ ( ): lowercase = Accelerator() lowercase = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(lowerCAmelCase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(lowerCAmelCase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ ,f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,) test_gradient_accumulation(lowerCAmelCase__ ,lowerCAmelCase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" ,"""2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,"""`split_batches=False`, `dispatch_batches=False`**""" ,) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,) test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase__ ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
72
1
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): return abs(lowerCAmelCase__ ) if a == 0 else greatest_common_divisor(b % a ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): while y: # --> when y=0 then loop will terminate and return x as final GCD. lowercase , lowercase = y, x % y return abs(lowerCAmelCase__ ) def UpperCamelCase__ ( ): try: lowercase = input("""Enter two integers separated by comma (,): """ ).split(""",""" ) lowercase = int(nums[0] ) lowercase = int(nums[1] ) print( f"""greatest_common_divisor({num_a}, {num_a}) = """ f"""{greatest_common_divisor(lowerCAmelCase__ ,lowerCAmelCase__ )}""" ) print(f"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowerCAmelCase__ ,lowerCAmelCase__ )}""" ) except (IndexError, UnboundLocalError, ValueError): print("""Wrong input""" ) if __name__ == "__main__": main()
72
import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 __SCREAMING_SNAKE_CASE : Tuple =get_tests_dir('''fixtures/dummy_feature_extractor_config.json''') __SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures/vocab.json''') __SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures''') class A_ ( unittest.TestCase ): _A :List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = 0 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaConfig() lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" ) # save in new folder model_config.save_pretrained(snake_case__ ) processor.save_pretrained(snake_case__ ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ): with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : int ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaFeatureExtractor() lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" ) lowercase = WavaVecaProcessor(snake_case__ , snake_case__ ) # save in new folder processor.save_pretrained(snake_case__ ) # drop `processor_class` in tokenizer with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f: lowercase = json.load(snake_case__ ) config_dict.pop("""processor_class""" ) with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f: f.write(json.dumps(snake_case__ ) ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaFeatureExtractor() lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" ) lowercase = WavaVecaProcessor(snake_case__ , snake_case__ ) # save in new folder processor.save_pretrained(snake_case__ ) # drop `processor_class` in feature extractor with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f: lowercase = json.load(snake_case__ ) config_dict.pop("""processor_class""" ) with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f: f.write(json.dumps(snake_case__ ) ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" ) model_config.save_pretrained(snake_case__ ) # copy relevant files copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) ) # create emtpy sample processor with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f: f.write("""{}""" ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(snake_case__ ): lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(snake_case__ ): lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) lowercase = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) lowercase = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ , use_fast=snake_case__ ) lowercase = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): try: AutoConfig.register("""custom""" , snake_case__ ) AutoFeatureExtractor.register(snake_case__ , snake_case__ ) AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ ) AutoProcessor.register(snake_case__ , snake_case__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(snake_case__ ): AutoProcessor.register(snake_case__ , snake_case__ ) # Now that the config is registered, it can be used as any other config with the auto-API lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase = os.path.join(snake_case__ , """vocab.txt""" ) with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) lowercase = CustomTokenizer(snake_case__ ) lowercase = CustomProcessor(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(snake_case__ ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): class A_ ( __a ): _A :List[str] = False class A_ ( __a ): _A :Dict = False class A_ ( __a ): _A :Union[str, Any] = '''AutoFeatureExtractor''' _A :Tuple = '''AutoTokenizer''' _A :Optional[Any] = False try: AutoConfig.register("""custom""" , snake_case__ ) AutoFeatureExtractor.register(snake_case__ , snake_case__ ) AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ ) AutoProcessor.register(snake_case__ , snake_case__ ) # If remote code is not set, the default is to use local classes. lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" ) self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" ) @is_staging_test class A_ ( unittest.TestCase ): _A :Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] ): lowercase = TOKEN HfFolder.save_token(snake_case__ ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] ): try: delete_repo(token=cls._token , repo_id="""test-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" ) except HTTPError: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = WavaVecaProcessor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(snake_case__ , """test-processor""" ) , push_to_hub=snake_case__ , use_auth_token=self._token ) lowercase = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = WavaVecaProcessor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(snake_case__ , """test-processor-org""" ) , push_to_hub=snake_case__ , use_auth_token=self._token , organization="""valid_org""" , ) lowercase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase = os.path.join(snake_case__ , """vocab.txt""" ) with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) lowercase = CustomTokenizer(snake_case__ ) lowercase = CustomProcessor(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token ) lowercase = Repository(snake_case__ , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token ) processor.save_pretrained(snake_case__ ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { """AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""", """AutoProcessor""": """custom_processing.CustomProcessor""", } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(snake_case__ , """tokenizer_config.json""" ) ) as f: lowercase = json.load(snake_case__ ) self.assertDictEqual( tokenizer_config["""auto_map"""] , { """AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None], """AutoProcessor""": """custom_processing.CustomProcessor""", } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_feature_extraction.py""" ) ) ) self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_tokenization.py""" ) ) ) self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_processing.py""" ) ) ) repo.push_to_hub() lowercase = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=snake_case__ ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
72
1
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class A_ ( __a , __a ): @register_to_config def __init__( self : Union[str, Any] , snake_case__ : int = 1_28 , snake_case__ : int = 2_56 , snake_case__ : float = 2_000.0 , snake_case__ : int = 7_68 , snake_case__ : int = 12 , snake_case__ : int = 12 , snake_case__ : int = 64 , snake_case__ : int = 20_48 , snake_case__ : float = 0.1 , ): super().__init__() lowercase = nn.Sequential( nn.Linear(snake_case__ , d_model * 4 , bias=snake_case__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=snake_case__ ) , nn.SiLU() , ) lowercase = nn.Embedding(snake_case__ , snake_case__ ) lowercase = False lowercase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) lowercase = nn.Dropout(p=snake_case__ ) lowercase = nn.ModuleList() for lyr_num in range(snake_case__ ): # FiLM conditional T5 decoder lowercase = DecoderLayer(d_model=snake_case__ , d_kv=snake_case__ , num_heads=snake_case__ , d_ff=snake_case__ , dropout_rate=snake_case__ ) self.decoders.append(snake_case__ ) lowercase = TaLayerNorm(snake_case__ ) lowercase = nn.Dropout(p=snake_case__ ) lowercase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[str] , snake_case__ : List[str] ): lowercase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str ): lowercase , lowercase , lowercase = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. lowercase = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) lowercase = self.conditioning_emb(snake_case__ ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) lowercase = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. lowercase = torch.broadcast_to( torch.arange(snake_case__ , device=decoder_input_tokens.device ) , (batch, seq_length) , ) lowercase = self.position_encoding(snake_case__ ) lowercase = self.continuous_inputs_projection(snake_case__ ) inputs += position_encodings lowercase = self.dropout(snake_case__ ) # decoder: No padding present. lowercase = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. lowercase = [(x, self.encoder_decoder_mask(snake_case__ , snake_case__ )) for x, y in encodings_and_masks] # cross attend style: concat encodings lowercase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) lowercase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: lowercase = lyr( snake_case__ , conditioning_emb=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )[0] lowercase = self.decoder_norm(snake_case__ ) lowercase = self.post_dropout(snake_case__ ) lowercase = self.spec_out(snake_case__ ) return spec_out class A_ ( nn.Module ): def __init__( self : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : str=1E-6 ): super().__init__() lowercase = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=snake_case__ , d_kv=snake_case__ , num_heads=snake_case__ , dropout_rate=snake_case__ ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=snake_case__ , d_kv=snake_case__ , num_heads=snake_case__ , dropout_rate=snake_case__ , layer_norm_epsilon=snake_case__ , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=snake_case__ , d_ff=snake_case__ , dropout_rate=snake_case__ , layer_norm_epsilon=snake_case__ ) ) def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : int , snake_case__ : Tuple=None , snake_case__ : int=None , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : List[Any]=None , ): lowercase = self.layer[0]( snake_case__ , conditioning_emb=snake_case__ , attention_mask=snake_case__ , ) if encoder_hidden_states is not None: lowercase = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) lowercase = self.layer[1]( snake_case__ , key_value_states=snake_case__ , attention_mask=snake_case__ , ) # Apply Film Conditional Feed Forward layer lowercase = self.layer[-1](snake_case__ , snake_case__ ) return (hidden_states,) class A_ ( nn.Module ): def __init__( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Any , snake_case__ : int ): super().__init__() lowercase = TaLayerNorm(snake_case__ ) lowercase = TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case__ ) lowercase = Attention(query_dim=snake_case__ , heads=snake_case__ , dim_head=snake_case__ , out_bias=snake_case__ , scale_qk=snake_case__ ) lowercase = nn.Dropout(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Dict , snake_case__ : int=None , snake_case__ : str=None , ): # pre_self_attention_layer_norm lowercase = self.layer_norm(snake_case__ ) if conditioning_emb is not None: lowercase = self.FiLMLayer(snake_case__ , snake_case__ ) # Self-attention block lowercase = self.attention(snake_case__ ) lowercase = hidden_states + self.dropout(snake_case__ ) return hidden_states class A_ ( nn.Module ): def __init__( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple ): super().__init__() lowercase = Attention(query_dim=snake_case__ , heads=snake_case__ , dim_head=snake_case__ , out_bias=snake_case__ , scale_qk=snake_case__ ) lowercase = TaLayerNorm(snake_case__ , eps=snake_case__ ) lowercase = nn.Dropout(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str]=None , snake_case__ : Dict=None , ): lowercase = self.layer_norm(snake_case__ ) lowercase = self.attention( snake_case__ , encoder_hidden_states=snake_case__ , attention_mask=attention_mask.squeeze(1 ) , ) lowercase = hidden_states + self.dropout(snake_case__ ) return layer_output class A_ ( nn.Module ): def __init__( self : Dict , snake_case__ : int , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : Any ): super().__init__() lowercase = TaDenseGatedActDense(d_model=snake_case__ , d_ff=snake_case__ , dropout_rate=snake_case__ ) lowercase = TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case__ ) lowercase = TaLayerNorm(snake_case__ , eps=snake_case__ ) lowercase = nn.Dropout(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : Any=None ): lowercase = self.layer_norm(snake_case__ ) if conditioning_emb is not None: lowercase = self.film(snake_case__ , snake_case__ ) lowercase = self.DenseReluDense(snake_case__ ) lowercase = hidden_states + self.dropout(snake_case__ ) return hidden_states class A_ ( nn.Module ): def __init__( self : int , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Any ): super().__init__() lowercase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) lowercase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) lowercase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) lowercase = nn.Dropout(snake_case__ ) lowercase = NewGELUActivation() def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Optional[Any] ): lowercase = self.act(self.wi_a(snake_case__ ) ) lowercase = self.wi_a(snake_case__ ) lowercase = hidden_gelu * hidden_linear lowercase = self.dropout(snake_case__ ) lowercase = self.wo(snake_case__ ) return hidden_states class A_ ( nn.Module ): def __init__( self : int , snake_case__ : List[str] , snake_case__ : Optional[int]=1E-6 ): super().__init__() lowercase = nn.Parameter(torch.ones(snake_case__ ) ) lowercase = eps def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : List[str] ): # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 lowercase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=snake_case__ ) lowercase = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: lowercase = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class A_ ( nn.Module ): def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : torch.Tensor ): return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(snake_case__ , 3.0 )) )) class A_ ( nn.Module ): def __init__( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] ): super().__init__() lowercase = nn.Linear(snake_case__ , out_features * 2 , bias=snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : List[str] , snake_case__ : Tuple ): lowercase = self.scale_bias(snake_case__ ) lowercase , lowercase = torch.chunk(snake_case__ , 2 , -1 ) lowercase = x * (1 + scale) + shift return x
72
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" ,[ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" ,"""w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" ,"""w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" ,"""w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) lowercase = DatasetInfosDict.from_directory(lowerCAmelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( """dataset_info""" ,[ DatasetInfo(), DatasetInfo( description="""foo""" ,features=Features({"""a""": Value("""int32""" )} ) ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train"""}] ,download_size=42 ,), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = str(lowerCAmelCase__ ) dataset_info.write_to_directory(lowerCAmelCase__ ) lowercase = DatasetInfo.from_directory(lowerCAmelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(lowerCAmelCase__ ,"""dataset_info.json""" ) ) def UpperCamelCase__ ( ): lowercase = DatasetInfo( description="""foo""" ,citation="""bar""" ,homepage="""https://foo.bar""" ,license="""CC0""" ,features=Features({"""a""": Value("""int32""" )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train""", """num_examples""": 42}] ,download_checksums={} ,download_size=1_337 ,post_processing_size=442 ,dataset_size=1_234 ,size_in_bytes=1_337 + 442 + 1_234 ,) lowercase = dataset_info._to_yaml_dict() assert sorted(lowerCAmelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) ) lowercase = yaml.safe_dump(lowerCAmelCase__ ) lowercase = yaml.safe_load(lowerCAmelCase__ ) assert dataset_info_yaml_dict == reloaded def UpperCamelCase__ ( ): lowercase = DatasetInfo() lowercase = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" ,[ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" ,features=Features({"""a""": Value("""int32""" )} ) ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train"""}] ,download_size=42 ,) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=42 ), """v2""": DatasetInfo(dataset_size=1_337 ), } ), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = str(lowerCAmelCase__ ) dataset_infos_dict.write_to_directory(lowerCAmelCase__ ) lowercase = DatasetInfosDict.from_directory(lowerCAmelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): lowercase = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml lowercase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(lowerCAmelCase__ ,"""README.md""" ) )
72
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class A_ ( __a ): _A :Any = '''philschmid/bart-large-cnn-samsum''' _A :str = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) _A :Optional[int] = '''summarizer''' _A :str = AutoTokenizer _A :List[Any] = AutoModelForSeqaSeqLM _A :Optional[int] = ['''text'''] _A :Optional[int] = ['''text'''] def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : List[Any] ): return self.pre_processor(snake_case__ , return_tensors="""pt""" , truncation=snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : int ): return self.model.generate(**snake_case__ )[0] def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Optional[Any] ): return self.pre_processor.decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
72
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = args.pruning_method lowercase = args.threshold lowercase = args.model_name_or_path.rstrip("""/""" ) lowercase = args.target_model_path print(f"""Load fine-pruned model from {model_name_or_path}""" ) lowercase = torch.load(os.path.join(lowerCAmelCase__ ,"""pytorch_model.bin""" ) ) lowercase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowercase = tensor print(f"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: lowercase = tensor print(f"""Copied layer {name}""" ) elif "bias" in name: lowercase = tensor print(f"""Copied layer {name}""" ) else: if pruning_method == "magnitude": lowercase = MagnitudeBinarizer.apply(inputs=lowerCAmelCase__ ,threshold=lowerCAmelCase__ ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue lowercase = name[:-6] lowercase = model[f"""{prefix_}mask_scores"""] lowercase = TopKBinarizer.apply(lowerCAmelCase__ ,lowerCAmelCase__ ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowercase = name[:-6] lowercase = model[f"""{prefix_}mask_scores"""] lowercase = ThresholdBinarizer.apply(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue lowercase = name[:-6] lowercase = model[f"""{prefix_}mask_scores"""] lowercase , lowercase = -0.1, 1.1 lowercase = torch.sigmoid(lowerCAmelCase__ ) lowercase = s * (r - l) + l lowercase = s_bar.clamp(min=0.0 ,max=1.0 ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) else: raise ValueError("""Unknown pruning method""" ) if target_model_path is None: lowercase = os.path.join( os.path.dirname(lowerCAmelCase__ ) ,f"""bertarized_{os.path.basename(lowerCAmelCase__ )}""" ) if not os.path.isdir(lowerCAmelCase__ ): shutil.copytree(lowerCAmelCase__ ,lowerCAmelCase__ ) print(f"""\nCreated folder {target_model_path}""" ) torch.save(lowerCAmelCase__ ,os.path.join(lowerCAmelCase__ ,"""pytorch_model.bin""" ) ) print("""\nPruned model saved! See you later!""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] =argparse.ArgumentParser() parser.add_argument( '''--pruning_method''', choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''], type=str, required=True, help=( '''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,''' ''' sigmoied_threshold = Soft movement pruning)''' ), ) parser.add_argument( '''--threshold''', type=float, required=False, help=( '''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.''' '''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.''' '''Not needed for `l0`''' ), ) parser.add_argument( '''--model_name_or_path''', type=str, required=True, help='''Folder containing the model that was previously fine-pruned''', ) parser.add_argument( '''--target_model_path''', default=None, type=str, required=False, help='''Folder containing the model that was previously fine-pruned''', ) __SCREAMING_SNAKE_CASE : str =parser.parse_args() main(args)
72
1
from numpy import exp, pi, sqrt def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
72
# using dfs for finding eulerian path traversal def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ): lowercase = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: lowercase , lowercase = True, True lowercase = dfs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) return path def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = 0 lowercase = -1 for i in range(lowerCAmelCase__ ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 lowercase = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] lowercase , lowercase = check_circuit_or_path(lowerCAmelCase__ ,lowerCAmelCase__ ) if check == 3: print("""graph is not Eulerian""" ) print("""no path""" ) return lowercase = 1 if check == 2: lowercase = odd_node print("""graph has a Euler path""" ) if check == 1: print("""graph has a Euler cycle""" ) lowercase = dfs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) print(lowerCAmelCase__ ) def UpperCamelCase__ ( ): lowercase = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} lowercase = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} lowercase = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} lowercase = {1: [2, 3], 2: [1, 3], 3: [1, 2]} lowercase = { 1: [], 2: [] # all degree is zero } lowercase = 10 check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) if __name__ == "__main__": main()
72
1
from __future__ import annotations def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = 0.00 lowercase = 0 for resistor in resistors: if resistor <= 0: lowercase = f"""Resistor at index {index} has a negative or zero value!""" raise ValueError(lowerCAmelCase__ ) first_sum += 1 / float(lowerCAmelCase__ ) index += 1 return 1 / first_sum def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = 0.00 lowercase = 0 for resistor in resistors: sum_r += resistor if resistor < 0: lowercase = f"""Resistor at index {index} has a negative value!""" raise ValueError(lowerCAmelCase__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
72
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class A_ ( unittest.TestCase ): def __init__( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : List[str]=13 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : List[Any]=True , snake_case__ : Optional[int]=99 , snake_case__ : Any=32 , snake_case__ : Any=5 , snake_case__ : int=4 , snake_case__ : Optional[Any]=37 , snake_case__ : Dict="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : int=5_12 , snake_case__ : Optional[Any]=16 , snake_case__ : List[Any]=2 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : List[str]=4 , ): lowercase = parent lowercase = batch_size lowercase = seq_length lowercase = is_training lowercase = use_attention_mask lowercase = use_token_type_ids lowercase = use_labels lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = type_sequence_label_size lowercase = initializer_range lowercase = num_choices def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase = None if self.use_attention_mask: lowercase = random_attention_mask([self.batch_size, self.seq_length] ) lowercase = None if self.use_token_type_ids: lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase = self.prepare_config_and_inputs() lowercase , lowercase , lowercase , lowercase = config_and_inputs lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class A_ ( __a , unittest.TestCase ): _A :List[Any] = True _A :Union[str, Any] = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE__ ( self : int ): lowercase = FlaxRoFormerModelTester(self ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): for model_class_name in self.all_model_classes: lowercase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=snake_case__ ) lowercase = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case__ ) @require_flax class A_ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) lowercase = jnp.array([[0, 1, 2, 3, 4, 5]] ) lowercase = model(snake_case__ )[0] lowercase = 5_00_00 lowercase = (1, 6, vocab_size) self.assertEqual(output.shape , snake_case__ ) lowercase = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
72
1
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class A_ : _A :List[str] _A :Optional[str] = None # Automatically constructed _A :ClassVar[str] = "dict" _A :ClassVar[Any] = None _A :str = field(default='''Translation''' , init=__a , repr=__a ) def __call__( self : Any ): return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def SCREAMING_SNAKE_CASE__ ( self : Any ): from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class A_ : _A :Optional[List] = None _A :Optional[int] = None _A :Optional[str] = None # Automatically constructed _A :ClassVar[str] = "dict" _A :ClassVar[Any] = None _A :str = field(default='''TranslationVariableLanguages''' , init=__a , repr=__a ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowercase = sorted(set(self.languages ) ) if self.languages else None lowercase = len(self.languages ) if self.languages else None def __call__( self : Tuple ): return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : List[Any] ): lowercase = set(self.languages ) if self.languages and set(snake_case__ ) - lang_set: raise ValueError( F"""Some languages in example ({", ".join(sorted(set(snake_case__ ) - lang_set ) )}) are not in valid set ({", ".join(snake_case__ )}).""" ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. lowercase = [] for lang, text in translation_dict.items(): if isinstance(snake_case__ , snake_case__ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. lowercase , lowercase = zip(*sorted(snake_case__ ) ) return {"language": languages, "translation": translations} def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
72
import argparse import hashlib # hashlib is only used inside the Test class import struct class A_ : def __init__( self : List[str] , snake_case__ : Union[str, Any] ): lowercase = data lowercase = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0] @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ): return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = b"""\x80""" + b"""\x00""" * (63 - (len(self.data ) + 8) % 64) lowercase = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) ) return padded_data def SCREAMING_SNAKE_CASE__ ( self : List[str] ): return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Tuple ): lowercase = list(struct.unpack(""">16L""" , snake_case__ ) ) + [0] * 64 for i in range(16 , 80 ): lowercase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase = self.padding() lowercase = self.split_blocks() for block in self.blocks: lowercase = self.expand_block(snake_case__ ) lowercase , lowercase , lowercase , lowercase , lowercase = self.h for i in range(0 , 80 ): if 0 <= i < 20: lowercase = (b & c) | ((~b) & d) lowercase = 0X5_a_8_2_7_9_9_9 elif 20 <= i < 40: lowercase = b ^ c ^ d lowercase = 0X6_e_d_9_e_b_a_1 elif 40 <= i < 60: lowercase = (b & c) | (b & d) | (c & d) lowercase = 0X8_f_1_b_b_c_d_c elif 60 <= i < 80: lowercase = b ^ c ^ d lowercase = 0Xc_a_6_2_c_1_d_6 lowercase , lowercase , lowercase , lowercase , lowercase = ( self.rotate(snake_case__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f, a, self.rotate(snake_case__ , 30 ), c, d, ) lowercase = ( self.h[0] + a & 0Xf_f_f_f_f_f_f_f, self.h[1] + b & 0Xf_f_f_f_f_f_f_f, self.h[2] + c & 0Xf_f_f_f_f_f_f_f, self.h[3] + d & 0Xf_f_f_f_f_f_f_f, self.h[4] + e & 0Xf_f_f_f_f_f_f_f, ) return ("{:08x}" * 5).format(*self.h ) def UpperCamelCase__ ( ): lowercase = b"""Test String""" assert SHAaHash(lowerCAmelCase__ ).final_hash() == hashlib.shaa(lowerCAmelCase__ ).hexdigest() # noqa: S324 def UpperCamelCase__ ( ): lowercase = argparse.ArgumentParser(description="""Process some strings or files""" ) parser.add_argument( """--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,) parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" ) lowercase = parser.parse_args() lowercase = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file ,"""rb""" ) as f: lowercase = f.read() else: lowercase = bytes(lowerCAmelCase__ ,"""utf-8""" ) print(SHAaHash(lowerCAmelCase__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
72
1