code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput _a : str = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( _snake_case , _snake_case): @register_to_config def __init__( self , snake_case_ , snake_case_ = None , snake_case_ = None ): super().__init__() _snake_case : int = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" _snake_case : Union[str, Any] = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ ) else: _snake_case : Optional[Any] = None _snake_case : Dict = torch.nn.Parameter(lowerCAmelCase__ ) class _UpperCAmelCase ( _snake_case): __lowercase : Any = 4_2 __lowercase : List[Any] = 4_2 __lowercase : int = 4_2 __lowercase : Optional[Any] = 4_2 __lowercase : Union[str, Any] = 4_2 __lowercase : List[Any] = 4_2 def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ): super().__init__() self.register_modules( vqvae=lowerCAmelCase__ , transformer=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : List[str] = len(lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else 1 # get prompt text embeddings _snake_case : Optional[Any] = self.tokenizer( lowerCAmelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) _snake_case : Optional[int] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" F' {self.tokenizer.model_max_length} tokens: {removed_text}' ) _snake_case : List[Any] = text_input_ids[:, : self.tokenizer.model_max_length] _snake_case : Tuple = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 _snake_case : List[Any] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowerCAmelCase__ ) # duplicate text embeddings for each generation per prompt _snake_case : List[str] = prompt_embeds.repeat_interleave(lowerCAmelCase__ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: _snake_case : Dict = self.learned_classifier_free_sampling_embeddings.embeddings _snake_case : Union[str, Any] = negative_prompt_embeds.unsqueeze(0 ).repeat(lowerCAmelCase__ , 1 , 1 ) else: _snake_case : Union[str, Any] = [""] * batch_size _snake_case : Dict = text_input_ids.shape[-1] _snake_case : Optional[Any] = self.tokenizer( lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" , ) _snake_case : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings _snake_case : str = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowerCAmelCase__ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _snake_case : Any = negative_prompt_embeds.shape[1] _snake_case : Optional[int] = negative_prompt_embeds.repeat(1 , lowerCAmelCase__ , 1 ) _snake_case : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCAmelCase__ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _snake_case : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self , snake_case_ , snake_case_ = 1_00 , snake_case_ = 5.0 , snake_case_ = 1.0 , snake_case_ = 1 , snake_case_ = None , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , snake_case_ = None , snake_case_ = 1 , ): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _snake_case : List[Any] = 1 elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _snake_case : Any = len(lowerCAmelCase__ ) else: raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__ )}' ) _snake_case : List[Any] = batch_size * num_images_per_prompt _snake_case : Optional[int] = guidance_scale > 1.0 _snake_case : str = self._encode_prompt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or callback_steps <= 0) ): raise ValueError( F'`callback_steps` has to be a positive integer but is {callback_steps} of type' F' {type(lowerCAmelCase__ )}.' ) # get the initial completely masked latents unless the user supplied it _snake_case : Optional[int] = (batch_size, self.transformer.num_latent_pixels) if latents is None: _snake_case : str = self.transformer.num_vector_embeds - 1 _snake_case : Optional[Any] = torch.full(lowerCAmelCase__ , lowerCAmelCase__ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," F' {self.transformer.num_vector_embeds - 1} (inclusive).' ) _snake_case : str = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(lowerCAmelCase__ , device=self.device ) _snake_case : str = self.scheduler.timesteps.to(self.device ) _snake_case : Any = latents for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ): # expand the sample if we are doing classifier free guidance _snake_case : Union[str, Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` _snake_case : Optional[int] = self.transformer(lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , timestep=lowerCAmelCase__ ).sample if do_classifier_free_guidance: _snake_case , _snake_case : Optional[int] = model_output.chunk(2 ) _snake_case : Optional[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(lowerCAmelCase__ , dim=1 , keepdim=lowerCAmelCase__ ) _snake_case : Any = self.truncate(lowerCAmelCase__ , lowerCAmelCase__ ) # remove `log(0)`'s (`-inf`s) _snake_case : Union[str, Any] = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 _snake_case : str = self.scheduler.step(lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) _snake_case : Dict = self.vqvae.config.vq_embed_dim _snake_case : Tuple = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) _snake_case : Tuple = self.vqvae.quantize.get_codebook_entry(lowerCAmelCase__ , shape=lowerCAmelCase__ ) _snake_case : int = self.vqvae.decode(lowerCAmelCase__ , force_not_quantize=lowerCAmelCase__ ).sample _snake_case : Tuple = (image / 2 + 0.5).clamp(0 , 1 ) _snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _snake_case : Tuple = self.numpy_to_pil(lowerCAmelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase__ ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ ): _snake_case , _snake_case : Dict = torch.sort(lowerCAmelCase__ , 1 , descending=lowerCAmelCase__ ) _snake_case : List[Any] = torch.exp(lowerCAmelCase__ ) _snake_case : Any = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out _snake_case : List[str] = torch.full_like(keep_mask[:, 0:1, :] , lowerCAmelCase__ ) _snake_case : List[Any] = torch.cat((all_true, keep_mask) , dim=1 ) _snake_case : Union[str, Any] = keep_mask[:, :-1, :] _snake_case : Tuple = keep_mask.gather(1 , indices.argsort(1 ) ) _snake_case : List[str] = log_p_x_0.clone() _snake_case : Optional[Any] = -torch.inf # -inf = log(0) return rv
701
"""simple docstring""" from __future__ import annotations from collections import namedtuple def a__ ( a : float , a : float , a : float ): """simple docstring""" _snake_case : Optional[Any] = namedtuple("result" , "name value" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("Only one argument must be 0" ) elif power < 0: raise ValueError( "Power cannot be negative in any electrical/electronics system" ) elif voltage == 0: return result("voltage" , power / current ) elif current == 0: return result("current" , power / voltage ) elif power == 0: return result("power" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
87
0
"""simple docstring""" import numpy as np def a__ ( a : Tuple ): """simple docstring""" return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
702
"""simple docstring""" import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _UpperCAmelCase ( _snake_case , unittest.TestCase): __lowercase : Any = TextToVideoSDPipeline __lowercase : str = TEXT_TO_IMAGE_PARAMS __lowercase : int = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __lowercase : Optional[int] = frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ]) def lowerCamelCase__ ( self ): torch.manual_seed(0 ) _snake_case : str = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , ) _snake_case : List[Any] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , ) torch.manual_seed(0 ) _snake_case : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) _snake_case : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , ) _snake_case : Tuple = CLIPTextModel(snake_case_ ) _snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _snake_case : Any = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def lowerCamelCase__ ( self , snake_case_ , snake_case_=0 ): if str(snake_case_ ).startswith("mps" ): _snake_case : str = torch.manual_seed(snake_case_ ) else: _snake_case : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) _snake_case : str = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def lowerCamelCase__ ( self ): _snake_case : int = "cpu" # ensure determinism for the device-dependent torch.Generator _snake_case : Optional[Any] = self.get_dummy_components() _snake_case : Tuple = TextToVideoSDPipeline(**snake_case_ ) _snake_case : List[str] = sd_pipe.to(snake_case_ ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) _snake_case : int = self.get_dummy_inputs(snake_case_ ) _snake_case : Union[str, Any] = "np" _snake_case : Dict = sd_pipe(**snake_case_ ).frames _snake_case : Any = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) _snake_case : Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ ( self ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def lowerCamelCase__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=1E-2 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def lowerCamelCase__ ( self ): pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def lowerCamelCase__ ( self ): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): return super().test_progress_bar() @slow @skip_mps class _UpperCAmelCase ( unittest.TestCase): def lowerCamelCase__ ( self ): _snake_case : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" ) _snake_case : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) _snake_case : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) _snake_case : Tuple = pipe.to("cuda" ) _snake_case : List[Any] = "Spiderman is surfing" _snake_case : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) _snake_case : int = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type="pt" ).frames _snake_case : int = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def lowerCamelCase__ ( self ): _snake_case : Any = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" ) _snake_case : str = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) _snake_case : int = pipe.to("cuda" ) _snake_case : Any = "Spiderman is surfing" _snake_case : str = torch.Generator(device="cpu" ).manual_seed(0 ) _snake_case : Any = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="pt" ).frames _snake_case : Optional[int] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
87
0
"""simple docstring""" from __future__ import annotations from functools import lru_cache from math import ceil _a : List[Any] = 100 _a : List[str] = set(range(3, NUM_PRIMES, 2)) primes.add(2) _a : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def a__ ( a : Optional[int] ): """simple docstring""" if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} _snake_case : Union[str, Any] = set() _snake_case : Tuple = 42 _snake_case : str = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def a__ ( a : Union[str, Any] = 5_000 ): """simple docstring""" for number_to_partition in range(1 , a ): if len(partition(a ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(f'{solution() = }')
703
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class _UpperCAmelCase ( _snake_case): __lowercase : int = """EncodecFeatureExtractor""" __lowercase : str = ("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self , snake_case_ , snake_case_ ): super().__init__(snake_case_ , snake_case_ ) _snake_case : Dict = self.feature_extractor _snake_case : Any = False def lowerCamelCase__ ( self , snake_case_=None , snake_case_=None , snake_case_=True ): return self.tokenizer.get_decoder_prompt_ids(task=snake_case_ , language=snake_case_ , no_timestamps=snake_case_ ) def __call__( self , *snake_case_ , **snake_case_ ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*snake_case_ , **snake_case_ ) _snake_case : str = kwargs.pop("audio" , snake_case_ ) _snake_case : Optional[int] = kwargs.pop("sampling_rate" , snake_case_ ) _snake_case : Optional[Any] = kwargs.pop("text" , snake_case_ ) if len(snake_case_ ) > 0: _snake_case : Any = args[0] _snake_case : Union[str, Any] = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if text is not None: _snake_case : Any = self.tokenizer(snake_case_ , **snake_case_ ) if audio is not None: _snake_case : Any = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ ) if audio is None: return inputs elif text is None: return audio_inputs else: _snake_case : str = audio_inputs["input_values"] if "padding_mask" in audio_inputs: _snake_case : List[str] = audio_inputs["padding_mask"] return inputs def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ): _snake_case : Tuple = kwargs.pop("audio" , snake_case_ ) _snake_case : List[str] = kwargs.pop("padding_mask" , snake_case_ ) if len(snake_case_ ) > 0: _snake_case : Tuple = args[0] _snake_case : Dict = args[1:] if audio_values is not None: return self._decode_audio(snake_case_ , padding_mask=snake_case_ ) else: return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ ) def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ): return self.tokenizer.decode(*snake_case_ , **snake_case_ ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ): _snake_case : Optional[int] = to_numpy(snake_case_ ) _snake_case , _snake_case , _snake_case : Tuple = audio_values.shape if padding_mask is None: return list(snake_case_ ) _snake_case : Optional[int] = to_numpy(snake_case_ ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) _snake_case : Any = seq_len - padding_mask.shape[-1] _snake_case : Optional[Any] = 1 - self.feature_extractor.padding_value _snake_case : Optional[int] = np.pad(snake_case_ , ((0, 0), (0, difference)) , "constant" , constant_values=snake_case_ ) _snake_case : Any = audio_values.tolist() for i in range(snake_case_ ): _snake_case : Tuple = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] _snake_case : Tuple = sliced_audio.reshape(snake_case_ , -1 ) return audio_values
87
0
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _a : Any = { '''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Any = ['''VivitImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Any = [ '''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''VivitModel''', '''VivitPreTrainedModel''', '''VivitForVideoClassification''', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys _a : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
704
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _a : str = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[Any] = ["""YolosFeatureExtractor"""] _a : List[Any] = ["""YolosImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Union[str, Any] = [ """YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""", """YolosForObjectDetection""", """YolosModel""", """YolosPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys _a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a : Union[str, Any] = logging.get_logger(__name__) _a : Dict = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class _UpperCAmelCase ( lowercase__): __lowercase : Tuple = """ibert""" def __init__( self , snake_case_=3_05_22 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=False , snake_case_="none" , **snake_case_ , ): super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) _snake_case : Any = vocab_size _snake_case : Optional[Any] = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : int = num_attention_heads _snake_case : List[str] = hidden_act _snake_case : List[str] = intermediate_size _snake_case : Optional[int] = hidden_dropout_prob _snake_case : Any = attention_probs_dropout_prob _snake_case : Tuple = max_position_embeddings _snake_case : Any = type_vocab_size _snake_case : Optional[int] = initializer_range _snake_case : Tuple = layer_norm_eps _snake_case : int = position_embedding_type _snake_case : List[str] = quant_mode _snake_case : int = force_dequant class _UpperCAmelCase ( lowercase__): @property def lowerCamelCase__ ( self ): if self.task == "multiple-choice": _snake_case : Any = {0: "batch", 1: "choice", 2: "sequence"} else: _snake_case : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
705
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Optional[int] = dataset _snake_case : str = process _snake_case : int = params def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): _snake_case : Union[str, Any] = self.dataset[i] _snake_case : Optional[Any] = self.process(snake_case_ , **self.params ) return processed class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ): _snake_case : Union[str, Any] = loader _snake_case : Tuple = infer _snake_case : List[Any] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _snake_case : int = None _snake_case : int = loader_batch_size # Internal bookkeeping _snake_case : Any = None _snake_case : Dict = None def __len__( self ): return len(self.loader ) def __iter__( self ): _snake_case : int = iter(self.loader ) return self def lowerCamelCase__ ( self ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _snake_case : List[Any] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _snake_case : int = {} for k, element in self._loader_batch_data.items(): if isinstance(snake_case_ , snake_case_ ): # Convert ModelOutput to tuple first _snake_case : Tuple = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _snake_case : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _snake_case : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case_ , snake_case_ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _snake_case : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _snake_case : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _snake_case : Tuple = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _snake_case : Union[str, Any] = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _snake_case : List[Any] = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _snake_case : int = self._loader_batch_data.__class__(snake_case_ ) self._loader_batch_index += 1 return result def lowerCamelCase__ ( self ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _snake_case : Tuple = next(self.iterator ) _snake_case : Any = self.infer(snake_case_ , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(snake_case_ , torch.Tensor ): _snake_case : Union[str, Any] = processed else: _snake_case : Optional[int] = list(processed.keys() )[0] _snake_case : List[str] = processed[key] if isinstance(snake_case_ , snake_case_ ): _snake_case : Dict = len(snake_case_ ) else: _snake_case : Optional[int] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _snake_case : Union[str, Any] = observed_batch_size # Setting internal index to unwrap the batch _snake_case : str = processed _snake_case : List[Any] = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ): super().__init__(snake_case_ , snake_case_ , snake_case_ ) def __iter__( self ): _snake_case : Tuple = iter(self.loader ) _snake_case : List[Any] = None return self def lowerCamelCase__ ( self ): if self.subiterator is None: _snake_case : Optional[Any] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _snake_case : Union[str, Any] = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _snake_case : str = self.infer(next(self.iterator ) , **self.params ) _snake_case : Tuple = next(self.subiterator ) return processed class _UpperCAmelCase ( _snake_case): def __iter__( self ): _snake_case : Optional[Any] = iter(self.loader ) return self def lowerCamelCase__ ( self ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. _snake_case : Optional[Any] = False _snake_case : Tuple = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _snake_case : Union[str, Any] = self.loader_batch_item() _snake_case : str = item.pop("is_last" ) accumulator.append(snake_case_ ) if is_last: return accumulator while not is_last: _snake_case : List[str] = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(snake_case_ , torch.Tensor ): _snake_case : Union[str, Any] = processed else: _snake_case : Tuple = list(processed.keys() )[0] _snake_case : Tuple = processed[key] if isinstance(snake_case_ , snake_case_ ): _snake_case : Any = len(snake_case_ ) else: _snake_case : List[Any] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _snake_case : Dict = observed_batch_size _snake_case : List[Any] = processed _snake_case : List[str] = 0 while self._loader_batch_index < self.loader_batch_size: _snake_case : Union[str, Any] = self.loader_batch_item() _snake_case : int = item.pop("is_last" ) accumulator.append(snake_case_ ) if is_last: return accumulator else: _snake_case : Dict = processed _snake_case : Dict = item.pop("is_last" ) accumulator.append(snake_case_ ) return accumulator class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ ): _snake_case : str = dataset _snake_case : Any = key def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): return self.dataset[i][self.key] class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : int = dataset _snake_case : Any = keya _snake_case : int = keya def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
87
0
"""simple docstring""" import functools def a__ ( a : Union[str, Any] , a : List[Any] ): """simple docstring""" if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not all(isinstance(lowerCamelCase_ , lowerCamelCase_ ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(lowerCamelCase_ ) != 3 or not all(isinstance(lowerCamelCase_ , lowerCamelCase_ ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(lowerCamelCase_ ) == 0: return 0 if min(lowerCamelCase_ ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(lowerCamelCase_ ) >= 366: raise ValueError("All days elements should be less than 366" ) _snake_case : Optional[Any] = set(lowerCamelCase_ ) @functools.cache def dynamic_programming(a : List[str] ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
706
"""simple docstring""" def a__ ( a : int ): """simple docstring""" if not isinstance(a , a ): raise TypeError("Input value must be an 'int' type" ) _snake_case : Union[str, Any] = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
87
0
"""simple docstring""" from __future__ import annotations def a__ ( a : float , a : float , a : float ): """simple docstring""" if days_between_payments <= 0: raise ValueError("days_between_payments must be > 0" ) if daily_interest_rate < 0: raise ValueError("daily_interest_rate must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return principal * daily_interest_rate * days_between_payments def a__ ( a : float , a : float , a : float , ): """simple docstring""" if number_of_compounding_periods <= 0: raise ValueError("number_of_compounding_periods must be > 0" ) if nominal_annual_interest_rate_percentage < 0: raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def a__ ( a : float , a : float , a : float , ): """simple docstring""" if number_of_years <= 0: raise ValueError("number_of_years must be > 0" ) if nominal_annual_percentage_rate < 0: raise ValueError("nominal_annual_percentage_rate must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return compound_interest( a , nominal_annual_percentage_rate / 365 , number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
707
"""simple docstring""" from __future__ import annotations import requests _a : List[str] = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def a__ ( a : str , a : int = 1 , a : str = "new" , a : list | None = None ): """simple docstring""" _snake_case : Any = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(a ) - valid_terms ) ): _snake_case : Optional[int] = f'Invalid search term: {invalid_search_terms}' raise ValueError(a ) _snake_case : int = requests.get( f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , ) if response.status_code == 429: raise requests.HTTPError _snake_case : Optional[Any] = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(a )} _snake_case : Tuple = {} for id_ in range(a ): _snake_case : List[str] = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
87
0
"""simple docstring""" import math def a__ ( a : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a__ ( a : float = 0.1 ): """simple docstring""" _snake_case : Optional[Any] = 3 _snake_case : Union[str, Any] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(__A ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
708
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def a__ ( a : float , a : float , a : bool = False ): """simple docstring""" if radian_mode: return [magnitude * cos(a ), magnitude * sin(a )] return [magnitude * cos(radians(a ) ), magnitude * sin(radians(a ) )] def a__ ( a : NDArray[floataa] , a : NDArray[floataa] , a : float = 10**-1 ): """simple docstring""" _snake_case : NDArray[floataa] = cross(a , a ) _snake_case : float = sum(a ) return abs(a ) < eps if __name__ == "__main__": # Test to check if it works _a : Tuple = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) _a : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg _a : List[Any] = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) _a : List[Any] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg _a : List[str] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]]) _a : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
87
0
"""simple docstring""" import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class _UpperCAmelCase ( _snake_case): def lowerCamelCase__ ( self ): _snake_case : str = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_lowerCAmelCase , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(_lowerCAmelCase , "num_attention_heads" ) ) self.parent.assertTrue(hasattr(_lowerCAmelCase , "num_encoder_blocks" ) ) class _UpperCAmelCase : def __init__( self , snake_case_ , snake_case_=13 , snake_case_=64 , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[16, 32, 64, 1_28] , snake_case_=[1, 4, 8, 16] , snake_case_=[1, 2, 4, 8] , snake_case_=True , snake_case_=True , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=3 , snake_case_=None , ): _snake_case : List[Any] = parent _snake_case : Tuple = batch_size _snake_case : Union[str, Any] = image_size _snake_case : List[Any] = num_channels _snake_case : List[Any] = num_encoder_blocks _snake_case : Tuple = sr_ratios _snake_case : int = depths _snake_case : Tuple = hidden_sizes _snake_case : Optional[Any] = downsampling_rates _snake_case : Tuple = num_attention_heads _snake_case : int = is_training _snake_case : List[Any] = use_labels _snake_case : Dict = hidden_act _snake_case : Dict = hidden_dropout_prob _snake_case : str = attention_probs_dropout_prob _snake_case : List[Any] = initializer_range _snake_case : int = num_labels _snake_case : List[Any] = scope def lowerCamelCase__ ( self ): _snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : List[str] = None if self.use_labels: _snake_case : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _snake_case : List[str] = self.get_config() return config, pixel_values, labels def lowerCamelCase__ ( self ): return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Optional[int] = SegformerModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _snake_case : Dict = model(_lowerCAmelCase ) _snake_case : Optional[Any] = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Optional[Any] = self.num_labels _snake_case : List[str] = SegformerForSemanticSegmentation(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _snake_case : Any = model(_lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) _snake_case : Tuple = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : List[str] = 1 _snake_case : Union[str, Any] = SegformerForSemanticSegmentation(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _snake_case : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_lowerCAmelCase ) _snake_case : Union[str, Any] = model(_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertGreater(result.loss , 0.0 ) def lowerCamelCase__ ( self ): _snake_case : Optional[int] = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : Tuple = config_and_inputs _snake_case : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( _snake_case , _snake_case , unittest.TestCase): __lowercase : Optional[int] = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) __lowercase : List[Any] = ( { """feature-extraction""": SegformerModel, """image-classification""": SegformerForImageClassification, """image-segmentation""": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) __lowercase : Any = True __lowercase : Any = False __lowercase : Dict = False __lowercase : Union[str, Any] = False def lowerCamelCase__ ( self ): _snake_case : Tuple = SegformerModelTester(self ) _snake_case : Tuple = SegformerConfigTester(self , config_class=_lowerCAmelCase ) def lowerCamelCase__ ( self ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self ): _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*_lowerCAmelCase ) def lowerCamelCase__ ( self ): _snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*_lowerCAmelCase ) @unittest.skip("SegFormer does not use inputs_embeds" ) def lowerCamelCase__ ( self ): pass @unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): _snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Optional[int] = model_class(_lowerCAmelCase ) _snake_case : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : str = [*signature.parameters.keys()] _snake_case : Union[str, Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def lowerCamelCase__ ( self ): _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Optional[int] = True for model_class in self.all_model_classes: _snake_case : List[Any] = True _snake_case : Union[str, Any] = False _snake_case : Dict = True _snake_case : Tuple = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): _snake_case : Dict = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) _snake_case : Union[str, Any] = outputs.attentions _snake_case : int = sum(self.model_tester.depths ) self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _snake_case : List[Any] = True _snake_case : int = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): _snake_case : Dict = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) _snake_case : List[str] = outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # verify the first attentions (first block, first layer) _snake_case : Optional[Any] = (self.model_tester.image_size // 4) ** 2 _snake_case : str = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) _snake_case : Dict = (self.model_tester.image_size // 32) ** 2 _snake_case : Tuple = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) _snake_case : List[Any] = len(_lowerCAmelCase ) # Check attention is always last and order is fine _snake_case : List[Any] = True _snake_case : Any = True _snake_case : Dict = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): _snake_case : Optional[int] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) self.assertEqual(out_len + 1 , len(_lowerCAmelCase ) ) _snake_case : Tuple = outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # verify the first attentions (first block, first layer) _snake_case : str = (self.model_tester.image_size // 4) ** 2 _snake_case : Optional[int] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def lowerCamelCase__ ( self ): def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ): _snake_case : str = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): _snake_case : List[str] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) _snake_case : Tuple = outputs.hidden_states _snake_case : int = self.model_tester.num_encoder_blocks self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : List[Any] = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : List[Any] = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def lowerCamelCase__ ( self ): if not self.model_tester.is_training: return _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : str = True for model_class in self.all_model_classes: if model_class in get_values(_lowerCAmelCase ): continue _snake_case : Optional[Any] = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.train() _snake_case : Any = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase ) _snake_case : List[Any] = model(**_lowerCAmelCase ).loss loss.backward() @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def lowerCamelCase__ ( self ): pass @slow def lowerCamelCase__ ( self ): for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[Any] = SegformerModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def a__ ( ): """simple docstring""" _snake_case : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class _UpperCAmelCase ( unittest.TestCase): @slow def lowerCamelCase__ ( self ): # only resize + normalize _snake_case : Optional[int] = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=_lowerCAmelCase , align=_lowerCAmelCase , do_random_crop=_lowerCAmelCase ) _snake_case : Optional[int] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to( _lowerCAmelCase ) _snake_case : str = prepare_img() _snake_case : str = image_processor(images=_lowerCAmelCase , return_tensors="pt" ) _snake_case : Union[str, Any] = encoded_inputs.pixel_values.to(_lowerCAmelCase ) with torch.no_grad(): _snake_case : Optional[Any] = model(_lowerCAmelCase ) _snake_case : Tuple = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) _snake_case : Tuple = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1E-4 ) ) @slow def lowerCamelCase__ ( self ): # only resize + normalize _snake_case : Tuple = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=_lowerCAmelCase , align=_lowerCAmelCase , do_random_crop=_lowerCAmelCase ) _snake_case : str = SegformerForSemanticSegmentation.from_pretrained( "nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(_lowerCAmelCase ) _snake_case : Any = prepare_img() _snake_case : Any = image_processor(images=_lowerCAmelCase , return_tensors="pt" ) _snake_case : int = encoded_inputs.pixel_values.to(_lowerCAmelCase ) with torch.no_grad(): _snake_case : Optional[Any] = model(_lowerCAmelCase ) _snake_case : Union[str, Any] = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) _snake_case : str = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1E-1 ) ) @slow def lowerCamelCase__ ( self ): # only resize + normalize _snake_case : Optional[Any] = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=_lowerCAmelCase , align=_lowerCAmelCase , do_random_crop=_lowerCAmelCase ) _snake_case : List[Any] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to( _lowerCAmelCase ) _snake_case : Optional[int] = prepare_img() _snake_case : List[str] = image_processor(images=_lowerCAmelCase , return_tensors="pt" ) _snake_case : Optional[Any] = encoded_inputs.pixel_values.to(_lowerCAmelCase ) with torch.no_grad(): _snake_case : Optional[Any] = model(_lowerCAmelCase ) _snake_case : Any = outputs.logits.detach().cpu() _snake_case : str = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase , target_sizes=[(5_00, 3_00)] ) _snake_case : Optional[int] = torch.Size((5_00, 3_00) ) self.assertEqual(segmentation[0].shape , _lowerCAmelCase ) _snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase ) _snake_case : List[str] = torch.Size((1_28, 1_28) ) self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
709
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Optional[int] = logging.get_logger(__name__) _a : str = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class _UpperCAmelCase ( _snake_case): __lowercase : Optional[Any] = """openai-gpt""" __lowercase : Dict = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , snake_case_=4_04_78 , snake_case_=5_12 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_="cls_index" , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=0.1 , **snake_case_ , ): _snake_case : Tuple = vocab_size _snake_case : Dict = n_positions _snake_case : Any = n_embd _snake_case : Any = n_layer _snake_case : Optional[int] = n_head _snake_case : Union[str, Any] = afn _snake_case : Dict = resid_pdrop _snake_case : str = embd_pdrop _snake_case : Union[str, Any] = attn_pdrop _snake_case : str = layer_norm_epsilon _snake_case : Union[str, Any] = initializer_range _snake_case : Any = summary_type _snake_case : List[str] = summary_use_proj _snake_case : Optional[int] = summary_activation _snake_case : Union[str, Any] = summary_first_dropout _snake_case : Optional[int] = summary_proj_to_labels super().__init__(**snake_case_ )
87
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a : List[str] = logging.get_logger(__name__) _a : Any = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class _UpperCAmelCase ( _snake_case): __lowercase : Union[str, Any] = "ibert" def __init__( self , snake_case_=3_05_22 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=False , snake_case_="none" , **snake_case_ , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) _snake_case : List[Any] = vocab_size _snake_case : Optional[Any] = hidden_size _snake_case : List[Any] = num_hidden_layers _snake_case : Any = num_attention_heads _snake_case : List[str] = hidden_act _snake_case : List[str] = intermediate_size _snake_case : Optional[int] = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : str = max_position_embeddings _snake_case : List[str] = type_vocab_size _snake_case : Dict = initializer_range _snake_case : Optional[int] = layer_norm_eps _snake_case : Any = position_embedding_type _snake_case : Tuple = quant_mode _snake_case : Union[str, Any] = force_dequant class _UpperCAmelCase ( _snake_case): @property def lowerCamelCase__ ( self ): if self.task == "multiple-choice": _snake_case : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: _snake_case : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
710
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() _a : Tuple = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _a : int = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def a__ ( a : List[str] , a : int , a : int ): """simple docstring""" _snake_case : Union[str, Any] = state_dict.pop(a ) _snake_case : Union[str, Any] = val def a__ ( a : Tuple ): """simple docstring""" _snake_case : Tuple = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: _snake_case : Dict = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) _snake_case : Tuple = value else: _snake_case : Dict = value return new_state_dict def a__ ( a : int ): """simple docstring""" _snake_case : Any = "" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) _snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : int = in_proj_weight[:256, :] _snake_case : List[str] = in_proj_bias[:256] _snake_case : Optional[Any] = in_proj_weight[256:512, :] _snake_case : List[str] = in_proj_bias[256:512] _snake_case : Dict = in_proj_weight[-256:, :] _snake_case : Dict = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention _snake_case : List[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) _snake_case : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : Union[str, Any] = in_proj_weight[:256, :] _snake_case : Tuple = in_proj_bias[:256] _snake_case : int = in_proj_weight[256:512, :] _snake_case : int = in_proj_bias[256:512] _snake_case : Dict = in_proj_weight[-256:, :] _snake_case : str = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention _snake_case : Dict = state_dict.pop( f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) _snake_case : Optional[int] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict _snake_case : Dict = in_proj_weight_cross_attn[:256, :] _snake_case : Any = in_proj_bias_cross_attn[:256] _snake_case : Union[str, Any] = in_proj_weight_cross_attn[256:512, :] _snake_case : Optional[int] = in_proj_bias_cross_attn[256:512] _snake_case : Any = in_proj_weight_cross_attn[-256:, :] _snake_case : str = in_proj_bias_cross_attn[-256:] def a__ ( a : str , a : int ): """simple docstring""" _snake_case , _snake_case : List[str] = image.size _snake_case : Dict = max(a , a ) _snake_case : Union[str, Any] = 800 if "detection" in checkpoint_url else 1_000 _snake_case : Any = target_max_size / current_max_size _snake_case : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def a__ ( a : str ): """simple docstring""" _snake_case : str = F.to_tensor(a ) _snake_case : Union[str, Any] = F.normalize(a , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def a__ ( a : Optional[Any] , a : Any , a : Union[str, Any] ): """simple docstring""" logger.info("Converting model..." ) # load original state dict _snake_case : Tuple = torch.hub.load_state_dict_from_url(a , map_location="cpu" ) # rename keys for src, dest in rename_keys: rename_key(a , a , a ) _snake_case : Union[str, Any] = rename_backbone_keys(a ) # query, key and value matrices need special treatment read_in_q_k_v(a ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _snake_case : int = "model." for key in state_dict.copy().keys(): if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): _snake_case : Optional[int] = state_dict.pop(a ) _snake_case : Any = val # create HuggingFace model and load state dict _snake_case : Tuple = TableTransformerConfig( backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: _snake_case : Any = 15 _snake_case : int = 2 _snake_case : Optional[Any] = {0: "table", 1: "table rotated"} _snake_case : Union[str, Any] = idalabel _snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} else: _snake_case : Any = 125 _snake_case : Union[str, Any] = 6 _snake_case : List[str] = { 0: "table", 1: "table column", 2: "table row", 3: "table column header", 4: "table projected row header", 5: "table spanning cell", } _snake_case : Any = idalabel _snake_case : Optional[int] = {v: k for k, v in idalabel.items()} _snake_case : Union[str, Any] = DetrImageProcessor( format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 ) _snake_case : str = TableTransformerForObjectDetection(a ) model.load_state_dict(a ) model.eval() # verify our conversion _snake_case : Optional[int] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png" _snake_case : Optional[Any] = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=a ) _snake_case : Dict = Image.open(a ).convert("RGB" ) _snake_case : Union[str, Any] = normalize(resize(a , a ) ).unsqueeze(0 ) _snake_case : str = model(a ) if "detection" in checkpoint_url: _snake_case : int = (1, 15, 3) _snake_case : List[str] = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) _snake_case : List[str] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: _snake_case : Union[str, Any] = (1, 125, 7) _snake_case : str = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) _snake_case : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) image_processor.save_pretrained(a ) if push_to_hub: # Push model to HF hub logger.info("Pushing model to the hub..." ) _snake_case : int = ( "microsoft/table-transformer-detection" if "detection" in checkpoint_url else "microsoft/table-transformer-structure-recognition" ) model.push_to_hub(a ) image_processor.push_to_hub(a ) if __name__ == "__main__": _a : Tuple = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _a : Any = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
87
0
"""simple docstring""" import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( """The `inpainting.py` script is outdated. Please use directly `from diffusers import""" """ StableDiffusionInpaintPipeline` instead.""" )
711
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
87
0
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _UpperCAmelCase ( unittest.TestCase): def lowerCamelCase__ ( self ): super().tearDown() gc.collect() def lowerCamelCase__ ( self ): _snake_case : Any = FlaxStableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , ) _snake_case : Optional[Any] = """A painting of a squirrel eating a burger""" _snake_case : Optional[Any] = jax.device_count() _snake_case : Any = num_samples * [prompt] _snake_case : Dict = sd_pipe.prepare_inputs(_a ) _snake_case : Dict = replicate(_a ) _snake_case : Tuple = shard(_a ) _snake_case : int = jax.random.PRNGKey(0 ) _snake_case : Optional[int] = jax.random.split(_a , jax.device_count() ) _snake_case : int = sd_pipe(_a , _a , _a , num_inference_steps=25 , jit=_a )[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) _snake_case : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _snake_case : int = images[0, 2_53:2_56, 2_53:2_56, -1] _snake_case : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _snake_case : Optional[int] = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def lowerCamelCase__ ( self ): _snake_case : str = """stabilityai/stable-diffusion-2""" _snake_case : Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(_a , subfolder="scheduler" ) _snake_case : int = FlaxStableDiffusionPipeline.from_pretrained( _a , scheduler=_a , revision="bf16" , dtype=jnp.bfloataa , ) _snake_case : int = scheduler_params _snake_case : Tuple = """A painting of a squirrel eating a burger""" _snake_case : str = jax.device_count() _snake_case : int = num_samples * [prompt] _snake_case : Optional[int] = sd_pipe.prepare_inputs(_a ) _snake_case : str = replicate(_a ) _snake_case : Optional[int] = shard(_a ) _snake_case : List[Any] = jax.random.PRNGKey(0 ) _snake_case : Optional[int] = jax.random.split(_a , jax.device_count() ) _snake_case : Tuple = sd_pipe(_a , _a , _a , num_inference_steps=25 , jit=_a )[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) _snake_case : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _snake_case : int = images[0, 2_53:2_56, 2_53:2_56, -1] _snake_case : int = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _snake_case : Tuple = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
712
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _a : Optional[int] = logging.get_logger(__name__) _a : List[str] = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class _UpperCAmelCase ( _snake_case , _snake_case): __lowercase : List[Any] = """convnextv2""" def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=4 , snake_case_=None , snake_case_=None , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.0 , snake_case_=2_24 , snake_case_=None , snake_case_=None , **snake_case_ , ): super().__init__(**snake_case_ ) _snake_case : Tuple = num_channels _snake_case : Optional[int] = patch_size _snake_case : Tuple = num_stages _snake_case : int = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes _snake_case : str = [3, 3, 9, 3] if depths is None else depths _snake_case : int = hidden_act _snake_case : Tuple = initializer_range _snake_case : Union[str, Any] = layer_norm_eps _snake_case : Optional[int] = drop_path_rate _snake_case : Union[str, Any] = image_size _snake_case : List[Any] = ["stem"] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )] _snake_case , _snake_case : Dict = get_aligned_output_features_output_indices( out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
87
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _a : Any = logging.get_logger(__name__) def a__ ( a : Optional[Any] ): """simple docstring""" _snake_case : int = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: _snake_case : Optional[Any] = [144, 192, 240] _snake_case : Optional[Any] = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: _snake_case : Tuple = [96, 120, 144] _snake_case : Tuple = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: _snake_case : Dict = [64, 80, 96] _snake_case : Tuple = [16, 16, 24, 48, 64, 80, 320] _snake_case : int = 0.05 _snake_case : Tuple = 2.0 if mobilevit_name.startswith("deeplabv3_" ): _snake_case : str = 512 _snake_case : Optional[int] = 16 _snake_case : Union[str, Any] = 21 _snake_case : str = "pascal-voc-id2label.json" else: _snake_case : Union[str, Any] = 1_000 _snake_case : int = "imagenet-1k-id2label.json" _snake_case : List[Any] = "huggingface/label-files" _snake_case : Optional[Any] = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="dataset" ) , "r" ) ) _snake_case : Optional[Any] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()} _snake_case : Dict = idalabel _snake_case : List[Any] = {v: k for k, v in idalabel.items()} return config def a__ ( a : int , a : Tuple=False ): """simple docstring""" for i in range(1 , 6 ): if f'layer_{i}.' in name: _snake_case : Dict = name.replace(f'layer_{i}.' , f'encoder.layer.{i - 1}.' ) if "conv_1." in name: _snake_case : Any = name.replace("conv_1." , "conv_stem." ) if ".block." in name: _snake_case : Optional[Any] = name.replace(".block." , "." ) if "exp_1x1" in name: _snake_case : Optional[int] = name.replace("exp_1x1" , "expand_1x1" ) if "red_1x1" in name: _snake_case : Tuple = name.replace("red_1x1" , "reduce_1x1" ) if ".local_rep.conv_3x3." in name: _snake_case : Optional[Any] = name.replace(".local_rep.conv_3x3." , ".conv_kxk." ) if ".local_rep.conv_1x1." in name: _snake_case : Tuple = name.replace(".local_rep.conv_1x1." , ".conv_1x1." ) if ".norm." in name: _snake_case : Any = name.replace(".norm." , ".normalization." ) if ".conv." in name: _snake_case : List[Any] = name.replace(".conv." , ".convolution." ) if ".conv_proj." in name: _snake_case : List[str] = name.replace(".conv_proj." , ".conv_projection." ) for i in range(0 , 2 ): for j in range(0 , 4 ): if f'.{i}.{j}.' in name: _snake_case : Any = name.replace(f'.{i}.{j}.' , f'.{i}.layer.{j}.' ) for i in range(2 , 6 ): for j in range(0 , 4 ): if f'.{i}.{j}.' in name: _snake_case : Optional[int] = name.replace(f'.{i}.{j}.' , f'.{i}.' ) if "expand_1x1" in name: _snake_case : Dict = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" ) if "conv_3x3" in name: _snake_case : Tuple = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" ) if "reduce_1x1" in name: _snake_case : List[str] = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" ) for i in range(2 , 5 ): if f'.global_rep.{i}.weight' in name: _snake_case : List[Any] = name.replace(f'.global_rep.{i}.weight' , ".layernorm.weight" ) if f'.global_rep.{i}.bias' in name: _snake_case : Optional[Any] = name.replace(f'.global_rep.{i}.bias' , ".layernorm.bias" ) if ".global_rep." in name: _snake_case : List[str] = name.replace(".global_rep." , ".transformer." ) if ".pre_norm_mha.0." in name: _snake_case : Any = name.replace(".pre_norm_mha.0." , ".layernorm_before." ) if ".pre_norm_mha.1.out_proj." in name: _snake_case : int = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." ) if ".pre_norm_ffn.0." in name: _snake_case : Union[str, Any] = name.replace(".pre_norm_ffn.0." , ".layernorm_after." ) if ".pre_norm_ffn.1." in name: _snake_case : Any = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." ) if ".pre_norm_ffn.4." in name: _snake_case : Dict = name.replace(".pre_norm_ffn.4." , ".output.dense." ) if ".transformer." in name: _snake_case : Any = name.replace(".transformer." , ".transformer.layer." ) if ".aspp_layer." in name: _snake_case : List[str] = name.replace(".aspp_layer." , "." ) if ".aspp_pool." in name: _snake_case : Union[str, Any] = name.replace(".aspp_pool." , "." ) if "seg_head." in name: _snake_case : List[str] = name.replace("seg_head." , "segmentation_head." ) if "segmentation_head.classifier.classifier." in name: _snake_case : Tuple = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." ) if "classifier.fc." in name: _snake_case : Any = name.replace("classifier.fc." , "classifier." ) elif (not base_model) and ("segmentation_head." not in name): _snake_case : Tuple = "mobilevit." + name return name def a__ ( a : Optional[Any] , a : Dict , a : Optional[Any]=False ): """simple docstring""" if base_model: _snake_case : Tuple = "" else: _snake_case : str = "mobilevit." for key in orig_state_dict.copy().keys(): _snake_case : int = orig_state_dict.pop(__UpperCAmelCase ) if key[:8] == "encoder.": _snake_case : Dict = key[8:] if "qkv" in key: _snake_case : List[str] = key.split("." ) _snake_case : Union[str, Any] = int(key_split[0][6:] ) - 1 _snake_case : int = int(key_split[3] ) _snake_case : Union[str, Any] = model.get_submodule(f'{model_prefix}encoder.layer.{layer_num}' ) _snake_case : Any = layer.transformer.layer[transformer_num].attention.attention.all_head_size _snake_case : List[Any] = ( f'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.' ) if "weight" in key: _snake_case : Tuple = val[:dim, :] _snake_case : Optional[int] = val[dim : dim * 2, :] _snake_case : Union[str, Any] = val[-dim:, :] else: _snake_case : int = val[:dim] _snake_case : Any = val[dim : dim * 2] _snake_case : Optional[Any] = val[-dim:] else: _snake_case : List[str] = val return orig_state_dict def a__ ( ): """simple docstring""" _snake_case : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" _snake_case : Dict = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ) return im @torch.no_grad() def a__ ( a : List[str] , a : Tuple , a : Union[str, Any] , a : List[str]=False ): """simple docstring""" _snake_case : Dict = get_mobilevit_config(__UpperCAmelCase ) # load original state_dict _snake_case : str = torch.load(__UpperCAmelCase , map_location="cpu" ) # load 🤗 model if mobilevit_name.startswith("deeplabv3_" ): _snake_case : Dict = MobileViTForSemanticSegmentation(__UpperCAmelCase ).eval() else: _snake_case : Union[str, Any] = MobileViTForImageClassification(__UpperCAmelCase ).eval() _snake_case : List[Any] = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase ) model.load_state_dict(__UpperCAmelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor _snake_case : Union[str, Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _snake_case : int = image_processor(images=prepare_img() , return_tensors="pt" ) _snake_case : Optional[int] = model(**__UpperCAmelCase ) _snake_case : Any = outputs.logits if mobilevit_name.startswith("deeplabv3_" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": _snake_case : Dict = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": _snake_case : Optional[Any] = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": _snake_case : Union[str, Any] = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8624, -9.5964], [-10.8_840, -10.8_158, -10.6_659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ] ) else: raise ValueError(f'Unknown mobilevit_name: {mobilevit_name}' ) assert torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1e-4 ) else: assert logits.shape == (1, 1_000) if mobilevit_name == "mobilevit_s": _snake_case : List[Any] = torch.tensor([-0.9866, 0.2392, -1.1241] ) elif mobilevit_name == "mobilevit_xs": _snake_case : str = torch.tensor([-2.4761, -0.9399, -1.9587] ) elif mobilevit_name == "mobilevit_xxs": _snake_case : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ) else: raise ValueError(f'Unknown mobilevit_name: {mobilevit_name}' ) assert torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) print(f'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(__UpperCAmelCase ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(__UpperCAmelCase ) if push_to_hub: _snake_case : Tuple = { "mobilevit_s": "mobilevit-small", "mobilevit_xs": "mobilevit-x-small", "mobilevit_xxs": "mobilevit-xx-small", "deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small", "deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small", "deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small", } print("Pushing to the hub..." ) _snake_case : str = model_mapping[mobilevit_name] image_processor.push_to_hub(__UpperCAmelCase , organization="apple" ) model.push_to_hub(__UpperCAmelCase , organization="apple" ) if __name__ == "__main__": _a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--mobilevit_name""", default="""mobilevit_s""", type=str, help=( """Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',""" """ 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'.""" ), ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _a : int = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
713
"""simple docstring""" from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def a__ ( a : Namespace ): """simple docstring""" return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) _a : int = """ transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. """ class _UpperCAmelCase ( _snake_case): @staticmethod def lowerCamelCase__ ( snake_case_ ): _snake_case : Dict = parser.add_parser( "convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , ) train_parser.add_argument("--model_type" , type=snake_case_ , required=snake_case_ , help="Model's type." ) train_parser.add_argument( "--tf_checkpoint" , type=snake_case_ , required=snake_case_ , help="TensorFlow checkpoint path or folder." ) train_parser.add_argument( "--pytorch_dump_output" , type=snake_case_ , required=snake_case_ , help="Path to the PyTorch saved model output." ) train_parser.add_argument("--config" , type=snake_case_ , default="" , help="Configuration file path or folder." ) train_parser.add_argument( "--finetuning_task_name" , type=snake_case_ , default=snake_case_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , ) train_parser.set_defaults(func=snake_case_ ) def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ , ): _snake_case : str = logging.get_logger("transformers-cli/converting" ) self._logger.info(F'Loading model {model_type}' ) _snake_case : Optional[int] = model_type _snake_case : Any = tf_checkpoint _snake_case : Optional[int] = pytorch_dump_output _snake_case : Tuple = config _snake_case : Tuple = finetuning_task_name def lowerCamelCase__ ( self ): if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) if "ckpt" in self._tf_checkpoint.lower(): _snake_case : int = self._tf_checkpoint _snake_case : Optional[Any] = "" else: _snake_case : Optional[int] = self._tf_checkpoint _snake_case : List[str] = "" convert_transfo_xl_checkpoint_to_pytorch( snake_case_ , self._config , self._pytorch_dump_output , snake_case_ ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
87
0
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() _a : str = logging.get_logger(__name__) _a : Optional[Any] = [ ["""attention""", """attn"""], ["""encoder_attention""", """encoder_attn"""], ["""q_lin""", """q_proj"""], ["""k_lin""", """k_proj"""], ["""v_lin""", """v_proj"""], ["""out_lin""", """out_proj"""], ["""norm_embeddings""", """layernorm_embedding"""], ["""position_embeddings""", """embed_positions"""], ["""embeddings""", """embed_tokens"""], ["""ffn.lin""", """fc"""], ] def a__ ( a : str ): """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _snake_case : Optional[int] = k.replace(lowerCAmelCase__ , lowerCAmelCase__ ) if k.startswith("encoder" ): _snake_case : List[Any] = k.replace(".attn" , ".self_attn" ) _snake_case : str = k.replace("norm1" , "self_attn_layer_norm" ) _snake_case : Dict = k.replace("norm2" , "final_layer_norm" ) elif k.startswith("decoder" ): _snake_case : Any = k.replace("norm1" , "self_attn_layer_norm" ) _snake_case : List[Any] = k.replace("norm2" , "encoder_attn_layer_norm" ) _snake_case : List[str] = k.replace("norm3" , "final_layer_norm" ) return k def a__ ( a : int ): """simple docstring""" _snake_case : Optional[int] = [ "model.encoder.layernorm_embedding.weight", "model.encoder.layernorm_embedding.bias", "model.decoder.layernorm_embedding.weight", "model.decoder.layernorm_embedding.bias", ] for k in keys: _snake_case : Union[str, Any] = sd.pop(lowerCAmelCase__ ) _snake_case : List[str] = k.replace("layernorm_embedding" , "layer_norm" ) assert new_k not in sd _snake_case : Optional[int] = v _a : Tuple = ["""START"""] @torch.no_grad() def a__ ( a : Dict , a : List[Any] , a : str ): """simple docstring""" _snake_case : str = torch.load(lowerCAmelCase__ , map_location="cpu" ) _snake_case : int = model["model"] _snake_case : Any = BlenderbotConfig.from_json_file(lowerCAmelCase__ ) _snake_case : str = BlenderbotForConditionalGeneration(lowerCAmelCase__ ) _snake_case : List[Any] = m.model.state_dict().keys() _snake_case : List[str] = [] _snake_case : List[Any] = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _snake_case : List[str] = rename_state_dict_key(lowerCAmelCase__ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _snake_case : Tuple = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(lowerCAmelCase__ ) m.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ ) m.half() m.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": _a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""") parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""") parser.add_argument( """--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use""" ) _a : str = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
714
"""simple docstring""" import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def a__ ( a : List[str] , a : Any ): """simple docstring""" if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer _snake_case : Any = flax_key_tuple[:-1] + ("weight",) _snake_case : str = torch.permute(a , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(a ): # linear layer _snake_case : Optional[int] = flax_key_tuple[:-1] + ("weight",) _snake_case : Any = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: _snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("weight",) return flax_key_tuple, flax_tensor def a__ ( a : List[Any] , a : Union[str, Any] , a : List[str] ): """simple docstring""" if "metadata" in layer: _snake_case : Optional[int] = layer.split("metadata" ) _snake_case : Optional[int] = "".join(split_layer[0] )[:-1] _snake_case : int = [tuple(("metadata" + split_layer[1]).split("/" ) )] elif "kvstore" in layer: _snake_case : Any = layer.split("kvstore" ) _snake_case : str = "".join(split_layer[0] )[:-1] _snake_case : Any = [tuple(("kvstore" + split_layer[1]).split("/" ) )] else: _snake_case : List[Any] = layer.split("/" ) _snake_case : Tuple = "/".join(split_layer[:-1] ) _snake_case : int = (split_layer[-1],) if "kvstore/path" in layer: _snake_case : Optional[Any] = f'{switch_checkpoint_path}/{checkpoint_info[layer]}' elif "kvstore/driver" in layer: _snake_case : Tuple = "file" else: _snake_case : Optional[int] = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def a__ ( a : List[Any] , a : List[Any] ): """simple docstring""" _snake_case : Union[str, Any] = rename_keys(a ) _snake_case : int = {} for k, v in current_block.items(): _snake_case : Optional[int] = v _snake_case : Optional[int] = new_current_block torch.save(a , a ) def a__ ( a : Dict , a : Tuple , a : List[str] , a : int , a : str = WEIGHTS_NAME ): """simple docstring""" _snake_case : Any = convert_file_size_to_int(a ) _snake_case : Tuple = [] _snake_case : Optional[int] = {} _snake_case : Tuple = 0 _snake_case : Optional[Any] = 0 os.makedirs(a , exist_ok=a ) with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp: _snake_case : Any = serialization.msgpack_restore(fp.read() )["optimizer"]["target"] _snake_case : Optional[Any] = flatten_dict(a , sep="/" ) _snake_case : Optional[Any] = {} for layer in checkpoint_info.keys(): _snake_case , _snake_case , _snake_case : int = get_key_and_tensorstore_dict( a , a , a ) if curr_real_layer_name in all_layers: _snake_case : Dict = content else: _snake_case : Tuple = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file _snake_case : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() _snake_case : Dict = torch.tensor(a ) _snake_case : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts _snake_case , _snake_case : Optional[int] = rename_base_flax_keys(tuple(key.split("/" ) ) , a ) _snake_case : Optional[Any] = "/".join(a ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: _snake_case : Any = os.path.join( a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) ) rename_and_save_block(a , a ) sharded_state_dicts.append(current_block.keys() ) del current_block _snake_case : List[Any] = {} _snake_case : str = 0 _snake_case : List[str] = raw_weights.to(getattr(a , a ) ) current_block_size += weight_size total_size += weight_size # Add the last block _snake_case : int = os.path.join(a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) ) rename_and_save_block(a , a ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(a ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index _snake_case : str = {} _snake_case : Any = {} for idx, shard in enumerate(a ): _snake_case : Optional[int] = weights_name.replace( ".bin" , f'-{idx+1:05d}-of-{len(a ):05d}.bin' ) # len(sharded_state_dicts):05d} _snake_case : Dict = os.path.join(a , weights_name.replace(".bin" , f'-{idx+1:05d}-of-???.bin' ) ) os.rename(a , os.path.join(a , a ) ) _snake_case : Dict = shard for key in shard: _snake_case : int = shard_file # Add the metadata _snake_case : List[Any] = {"total_size": total_size} _snake_case : Any = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f: _snake_case : Union[str, Any] = json.dumps(a , indent=2 , sort_keys=a ) + "\n" f.write(a ) return metadata, index if __name__ == "__main__": _a : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""") parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""", type=str, required=False, help="""Path to the output pytorch model.""", ) _a : Optional[int] = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def a__ ( ): """simple docstring""" from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer _snake_case : List[str] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" ) config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" ) _snake_case : str = SwitchTransformersForConditionalGeneration.from_pretrained( "/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" ) _snake_case : List[Any] = TaTokenizer.from_pretrained("t5-small" ) _snake_case : Optional[Any] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." _snake_case : Dict = tokenizer(a , return_tensors="pt" ).input_ids _snake_case : List[Any] = model.generate(a , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
87
0
"""simple docstring""" import requests _a : Optional[Any] = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=" def a__ ( a : List[str] ): """simple docstring""" _snake_case : Union[str, Any] = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["articles"] , 1 ): print(f'{i}.) {article["title"]}' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
715
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class _UpperCAmelCase ( _snake_case , _snake_case , unittest.TestCase): __lowercase : Dict = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) __lowercase : Optional[Any] = ( { """feature-extraction""": TFMobileBertModel, """fill-mask""": TFMobileBertForMaskedLM, """question-answering""": TFMobileBertForQuestionAnswering, """text-classification""": TFMobileBertForSequenceClassification, """token-classification""": TFMobileBertForTokenClassification, """zero-shot""": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) __lowercase : Union[str, Any] = False __lowercase : Optional[int] = False def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_=False ): _snake_case : Union[str, Any] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) if return_labels: if model_class in get_values(snake_case_ ): _snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ): _snake_case : Optional[Any] = parent _snake_case : List[Any] = batch_size _snake_case : Optional[int] = seq_length _snake_case : Dict = is_training _snake_case : Union[str, Any] = use_input_mask _snake_case : List[Any] = use_token_type_ids _snake_case : int = use_labels _snake_case : Dict = vocab_size _snake_case : Tuple = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : Optional[Any] = intermediate_size _snake_case : Dict = hidden_act _snake_case : Tuple = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : str = max_position_embeddings _snake_case : str = type_vocab_size _snake_case : Any = type_sequence_label_size _snake_case : Optional[int] = initializer_range _snake_case : List[Any] = num_labels _snake_case : Optional[int] = num_choices _snake_case : Optional[int] = scope _snake_case : Any = embedding_size def lowerCamelCase__ ( self ): _snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : Optional[Any] = None if self.use_input_mask: _snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case : List[str] = None if self.use_token_type_ids: _snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _snake_case : Dict = None _snake_case : Tuple = None _snake_case : str = None if self.use_labels: _snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) _snake_case : Tuple = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Dict = TFMobileBertModel(config=snake_case_ ) _snake_case : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Optional[int] = model(snake_case_ ) _snake_case : Union[str, Any] = [input_ids, input_mask] _snake_case : Optional[Any] = model(snake_case_ ) _snake_case : Dict = model(snake_case_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : List[Any] = TFMobileBertForMaskedLM(config=snake_case_ ) _snake_case : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : List[str] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=snake_case_ ) _snake_case : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Tuple = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : str = TFMobileBertForPreTraining(config=snake_case_ ) _snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : List[Any] = model(snake_case_ ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : str = self.num_labels _snake_case : str = TFMobileBertForSequenceClassification(config=snake_case_ ) _snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Optional[int] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Any = self.num_choices _snake_case : Tuple = TFMobileBertForMultipleChoice(config=snake_case_ ) _snake_case : List[Any] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) ) _snake_case : List[str] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) ) _snake_case : Tuple = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) ) _snake_case : int = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _snake_case : Optional[Any] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Union[str, Any] = self.num_labels _snake_case : Optional[int] = TFMobileBertForTokenClassification(config=snake_case_ ) _snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : List[Any] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : int = TFMobileBertForQuestionAnswering(config=snake_case_ ) _snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Union[str, Any] = model(snake_case_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Tuple = config_and_inputs _snake_case : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict def lowerCamelCase__ ( self ): _snake_case : int = TFMobileBertModelTest.TFMobileBertModelTester(self ) _snake_case : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 ) def lowerCamelCase__ ( self ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self ): _snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ ) @slow def lowerCamelCase__ ( self ): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _snake_case : str = TFMobileBertModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) @require_tf class _UpperCAmelCase ( unittest.TestCase): @slow def lowerCamelCase__ ( self ): _snake_case : Any = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" ) _snake_case : str = tf.constant([[0, 1, 2, 3, 4, 5]] ) _snake_case : Union[str, Any] = model(snake_case_ )[0] _snake_case : int = [1, 6, 3_05_22] self.assertEqual(output.shape , snake_case_ ) _snake_case : Optional[Any] = tf.constant( [ [ [-4.5919547, -9.248295, -9.645256], [-6.7306175, -6.440284, -6.6052837], [-7.2743506, -6.7847915, -6.024673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
87
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() _a : Optional[Any] = logging.get_logger(__name__) def a__ ( a : Tuple ): """simple docstring""" _snake_case : Optional[int] = DPTConfig() if "large" in checkpoint_url: _snake_case : Tuple = 1_024 _snake_case : Optional[Any] = 4_096 _snake_case : Dict = 24 _snake_case : Any = 16 _snake_case : Tuple = [5, 11, 17, 23] _snake_case : str = [256, 512, 1_024, 1_024] _snake_case : int = (1, 384, 384) if "ade" in checkpoint_url: _snake_case : str = True _snake_case : Optional[int] = 150 _snake_case : Optional[Any] = 'huggingface/label-files' _snake_case : Any = 'ade20k-id2label.json' _snake_case : List[Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) ) , "r" ) ) _snake_case : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _snake_case : int = idalabel _snake_case : Optional[Any] = {v: k for k, v in idalabel.items()} _snake_case : str = [1, 150, 480, 480] return config, expected_shape def a__ ( a : Dict ): """simple docstring""" _snake_case : Optional[int] = ['pretrained.model.head.weight', 'pretrained.model.head.bias'] for k in ignore_keys: state_dict.pop(_lowerCamelCase , _lowerCamelCase ) def a__ ( a : List[Any] ): """simple docstring""" if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): _snake_case : Tuple = name.replace("pretrained.model" , "dpt.encoder" ) if "pretrained.model" in name: _snake_case : Tuple = name.replace("pretrained.model" , "dpt.embeddings" ) if "patch_embed" in name: _snake_case : int = name.replace("patch_embed" , "patch_embeddings" ) if "pos_embed" in name: _snake_case : List[str] = name.replace("pos_embed" , "position_embeddings" ) if "attn.proj" in name: _snake_case : List[Any] = name.replace("attn.proj" , "attention.output.dense" ) if "proj" in name and "project" not in name: _snake_case : Optional[int] = name.replace("proj" , "projection" ) if "blocks" in name: _snake_case : Optional[Any] = name.replace("blocks" , "layer" ) if "mlp.fc1" in name: _snake_case : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: _snake_case : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" ) if "norm1" in name: _snake_case : List[Any] = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: _snake_case : List[Any] = name.replace("norm2" , "layernorm_after" ) if "scratch.output_conv" in name: _snake_case : List[str] = name.replace("scratch.output_conv" , "head" ) if "scratch" in name: _snake_case : Dict = name.replace("scratch" , "neck" ) if "layer1_rn" in name: _snake_case : Union[str, Any] = name.replace("layer1_rn" , "convs.0" ) if "layer2_rn" in name: _snake_case : Any = name.replace("layer2_rn" , "convs.1" ) if "layer3_rn" in name: _snake_case : Any = name.replace("layer3_rn" , "convs.2" ) if "layer4_rn" in name: _snake_case : List[Any] = name.replace("layer4_rn" , "convs.3" ) if "refinenet" in name: _snake_case : Optional[Any] = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 _snake_case : List[Any] = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' ) if "out_conv" in name: _snake_case : str = name.replace("out_conv" , "projection" ) if "resConfUnit1" in name: _snake_case : Tuple = name.replace("resConfUnit1" , "residual_layer1" ) if "resConfUnit2" in name: _snake_case : List[str] = name.replace("resConfUnit2" , "residual_layer2" ) if "conv1" in name: _snake_case : Dict = name.replace("conv1" , "convolution1" ) if "conv2" in name: _snake_case : List[Any] = name.replace("conv2" , "convolution2" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: _snake_case : Any = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" ) if "pretrained.act_postprocess2.0.project.0" in name: _snake_case : int = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" ) if "pretrained.act_postprocess3.0.project.0" in name: _snake_case : int = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" ) if "pretrained.act_postprocess4.0.project.0" in name: _snake_case : List[str] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" ) # resize blocks if "pretrained.act_postprocess1.3" in name: _snake_case : str = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" ) if "pretrained.act_postprocess1.4" in name: _snake_case : Dict = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" ) if "pretrained.act_postprocess2.3" in name: _snake_case : List[str] = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" ) if "pretrained.act_postprocess2.4" in name: _snake_case : List[str] = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" ) if "pretrained.act_postprocess3.3" in name: _snake_case : int = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" ) if "pretrained.act_postprocess4.3" in name: _snake_case : Optional[int] = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" ) if "pretrained.act_postprocess4.4" in name: _snake_case : Optional[int] = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" ) if "pretrained" in name: _snake_case : Dict = name.replace("pretrained" , "dpt" ) if "bn" in name: _snake_case : Dict = name.replace("bn" , "batch_norm" ) if "head" in name: _snake_case : Optional[Any] = name.replace("head" , "head.head" ) if "encoder.norm" in name: _snake_case : List[str] = name.replace("encoder.norm" , "layernorm" ) if "auxlayer" in name: _snake_case : Dict = name.replace("auxlayer" , "auxiliary_head.head" ) return name def a__ ( a : Optional[int] , a : Optional[int] ): """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _snake_case : List[str] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' ) _snake_case : Tuple = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : Optional[int] = in_proj_weight[: config.hidden_size, :] _snake_case : int = in_proj_bias[: config.hidden_size] _snake_case : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _snake_case : str = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _snake_case : Dict = in_proj_weight[ -config.hidden_size :, : ] _snake_case : int = in_proj_bias[-config.hidden_size :] def a__ ( ): """simple docstring""" _snake_case : int = 'http://images.cocodataset.org/val2017/000000039769.jpg' _snake_case : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return im @torch.no_grad() def a__ ( a : List[str] , a : Optional[Any] , a : List[str] , a : List[Any] ): """simple docstring""" _snake_case : Dict = get_dpt_config(_lowerCamelCase ) # load original state_dict from URL _snake_case : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" ) # remove certain keys remove_ignore_keys_(_lowerCamelCase ) # rename keys for key in state_dict.copy().keys(): _snake_case : Dict = state_dict.pop(_lowerCamelCase ) _snake_case : Optional[int] = val # read in qkv matrices read_in_q_k_v(_lowerCamelCase , _lowerCamelCase ) # load HuggingFace model _snake_case : Union[str, Any] = DPTForSemanticSegmentation(_lowerCamelCase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) model.eval() # Check outputs on an image _snake_case : Union[str, Any] = 480 if 'ade' in checkpoint_url else 384 _snake_case : str = DPTImageProcessor(size=_lowerCamelCase ) _snake_case : Any = prepare_img() _snake_case : Tuple = image_processor(_lowerCamelCase , return_tensors="pt" ) # forward pass _snake_case : Union[str, Any] = model(**_lowerCamelCase ).logits if 'ade' in checkpoint_url else model(**_lowerCamelCase ).predicted_depth # Assert logits _snake_case : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] ) if "ade" in checkpoint_url: _snake_case : int = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] ) assert outputs.shape == torch.Size(_lowerCamelCase ) assert ( torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase ) ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(_lowerCamelCase ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(_lowerCamelCase ) if push_to_hub: print("Pushing model to hub..." ) model.push_to_hub( repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , ) if __name__ == "__main__": _a : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""", type=str, help="""URL of the original DPT checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", ) parser.add_argument( """--model_name""", default="""dpt-large""", type=str, help="""Name of the model, in case you're pushing to the hub.""", ) _a : Tuple = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
716
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _a : List[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = ["""BartphoTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys _a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node _a : str = 4 _a : Optional[Any] = 3 class _UpperCAmelCase ( _A): pass def a__ ( a : int ): """simple docstring""" for shard in shards: for i in range(_lowerCamelCase ): yield {"i": i, "shard": shard} def a__ ( ): """simple docstring""" _snake_case : Tuple = int(os.environ["RANK"] ) _snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] ) _snake_case : List[Any] = ArgumentParser() parser.add_argument("--streaming" , type=_lowerCamelCase ) parser.add_argument("--local_rank" , type=_lowerCamelCase ) parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0 ) _snake_case : List[Any] = parser.parse_args() _snake_case : Union[str, Any] = args.streaming _snake_case : Dict = args.num_workers _snake_case : str = {"shards": [f'shard_{shard_idx}' for shard_idx in range(_lowerCamelCase )]} _snake_case : Any = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase ) if not streaming: _snake_case : int = Dataset.from_list(list(_lowerCamelCase ) ) _snake_case : Dict = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase ) _snake_case : Union[str, Any] = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase ) _snake_case : Union[str, Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD _snake_case : List[Any] = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) _snake_case : Union[str, Any] = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f'local_size {local_size} != expected_local_size {expected_local_size}' ) if __name__ == "__main__": main()
717
"""simple docstring""" def a__ ( a : list , a : int , a : int = 0 , a : int = 0 ): """simple docstring""" _snake_case : Optional[int] = right or len(a ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(a , a , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
87
0
"""simple docstring""" import math def a__ ( a : int ): """simple docstring""" _snake_case : Union[str, Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(a_ ) def a__ ( a : float = 1 / 12_345 ): """simple docstring""" _snake_case : Any = 0 _snake_case : str = 0 _snake_case : Optional[Any] = 3 while True: _snake_case : Optional[int] = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(a_ ): _snake_case : Union[str, Any] = int(a_ ) total_partitions += 1 if check_partition_perfect(a_ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(a_ ) integer += 1 if __name__ == "__main__": print(f'{solution() = }')
718
"""simple docstring""" from __future__ import annotations class _UpperCAmelCase : def __init__( self , snake_case_ , snake_case_ ): _snake_case , _snake_case : Dict = text, pattern _snake_case , _snake_case : int = len(snake_case_ ), len(snake_case_ ) def lowerCamelCase__ ( self , snake_case_ ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def lowerCamelCase__ ( self , snake_case_ ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def lowerCamelCase__ ( self ): # searches pattern in text and returns index positions _snake_case : List[str] = [] for i in range(self.textLen - self.patLen + 1 ): _snake_case : Union[str, Any] = self.mismatch_in_text(snake_case_ ) if mismatch_index == -1: positions.append(snake_case_ ) else: _snake_case : Tuple = self.match_in_pattern(self.text[mismatch_index] ) _snake_case : Tuple = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions _a : List[Any] = """ABAABA""" _a : str = """AB""" _a : List[Any] = BoyerMooreSearch(text, pattern) _a : Any = bms.bad_character_heuristic() if len(positions) == 0: print("""No match found""") else: print("""Pattern found in following positions: """) print(positions)
87
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer _a : List[str] = logging.get_logger(__name__) _a : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} _a : Optional[int] = { '''vocab_file''': { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt''' ), '''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''', '''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''', '''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''', '''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json''' ), '''google/realm-orqa-nq-openqa''': ( '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json''' ), '''google/realm-orqa-nq-reader''': ( '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json''' ), '''google/realm-orqa-wq-openqa''': ( '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json''' ), '''google/realm-orqa-wq-reader''': ( '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json''' ), }, } _a : Any = { '''google/realm-cc-news-pretrained-embedder''': 512, '''google/realm-cc-news-pretrained-encoder''': 512, '''google/realm-cc-news-pretrained-scorer''': 512, '''google/realm-cc-news-pretrained-openqa''': 512, '''google/realm-orqa-nq-openqa''': 512, '''google/realm-orqa-nq-reader''': 512, '''google/realm-orqa-wq-openqa''': 512, '''google/realm-orqa-wq-reader''': 512, } _a : Optional[int] = { '''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-nq-reader''': {'''do_lower_case''': True}, '''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-wq-reader''': {'''do_lower_case''': True}, } class _UpperCAmelCase ( _UpperCAmelCase): __lowercase : Any = VOCAB_FILES_NAMES __lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP __lowercase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION __lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : List[Any] = RealmTokenizer def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ): super().__init__( __UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , ) _snake_case : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , __UpperCamelCase ) != do_lower_case or normalizer_state.get("strip_accents" , __UpperCamelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , __UpperCamelCase ) != tokenize_chinese_chars ): _snake_case : Tuple = getattr(__UpperCamelCase , normalizer_state.pop("type" ) ) _snake_case : Optional[int] = do_lower_case _snake_case : Optional[Any] = strip_accents _snake_case : Optional[int] = tokenize_chinese_chars _snake_case : List[str] = normalizer_class(**__UpperCamelCase ) _snake_case : Optional[Any] = do_lower_case def lowerCamelCase__ ( self , snake_case_ , **snake_case_ ): _snake_case : Optional[Any] = PaddingStrategy.MAX_LENGTH _snake_case : Optional[Any] = text _snake_case : Dict = kwargs.pop("text_pair" , __UpperCamelCase ) _snake_case : int = kwargs.pop("return_tensors" , __UpperCamelCase ) _snake_case : List[Any] = { "input_ids": [], "attention_mask": [], "token_type_ids": [], } for idx, candidate_text in enumerate(__UpperCamelCase ): if batch_text_pair is not None: _snake_case : Dict = batch_text_pair[idx] else: _snake_case : int = None _snake_case : Tuple = super().__call__(__UpperCamelCase , __UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase ) _snake_case : Optional[int] = encoded_candidates.get("input_ids" ) _snake_case : List[Any] = encoded_candidates.get("attention_mask" ) _snake_case : int = encoded_candidates.get("token_type_ids" ) if encoded_input_ids is not None: output_data["input_ids"].append(__UpperCamelCase ) if encoded_attention_mask is not None: output_data["attention_mask"].append(__UpperCamelCase ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(__UpperCamelCase ) _snake_case : Union[str, Any] = {key: item for key, item in output_data.items() if len(__UpperCamelCase ) != 0} return BatchEncoding(__UpperCamelCase , tensor_type=__UpperCamelCase ) def lowerCamelCase__ ( self , snake_case_ , snake_case_=None ): _snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ): _snake_case : Any = [self.sep_token_id] _snake_case : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ): _snake_case : Optional[Any] = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase ) return tuple(__UpperCamelCase )
719
"""simple docstring""" from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": _a : Dict = input("""Enter image url: """).strip() print(f'Downloading image from {url} ...') _a : str = BeautifulSoup(requests.get(url).content, """html.parser""") # The image URL is in the content field of the first meta tag with property og:image _a : str = soup.find("""meta""", {"""property""": """og:image"""})["""content"""] _a : Dict = requests.get(image_url).content _a : str = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg' with open(file_name, """wb""") as fp: fp.write(image_data) print(f'Done. Image saved to disk as {file_name}.')
87
0
"""simple docstring""" class _UpperCAmelCase : def __init__( self , snake_case_ , snake_case_ ): _snake_case : str = name _snake_case : Any = val def __str__( self ): return F'{self.__class__.__name__}({self.name}, {self.val})' def __lt__( self , snake_case_ ): return self.val < other.val class _UpperCAmelCase : def __init__( self , snake_case_ ): _snake_case : Any = {} _snake_case : int = {} _snake_case : Tuple = self.build_heap(snake_case_ ) def __getitem__( self , snake_case_ ): return self.get_value(snake_case_ ) def lowerCamelCase__ ( self , snake_case_ ): return (idx - 1) // 2 def lowerCamelCase__ ( self , snake_case_ ): return idx * 2 + 1 def lowerCamelCase__ ( self , snake_case_ ): return idx * 2 + 2 def lowerCamelCase__ ( self , snake_case_ ): return self.heap_dict[key] def lowerCamelCase__ ( self , snake_case_ ): _snake_case : List[Any] = len(snake_case_ ) - 1 _snake_case : Optional[Any] = self.get_parent_idx(snake_case_ ) for idx, i in enumerate(snake_case_ ): _snake_case : Union[str, Any] = idx _snake_case : Dict = i.val for i in range(snake_case_ , -1 , -1 ): self.sift_down(snake_case_ , snake_case_ ) return array def lowerCamelCase__ ( self , snake_case_ , snake_case_ ): while True: _snake_case : str = self.get_left_child_idx(snake_case_ ) # noqa: E741 _snake_case : int = self.get_right_child_idx(snake_case_ ) _snake_case : Union[str, Any] = idx if l < len(snake_case_ ) and array[l] < array[idx]: _snake_case : Union[str, Any] = l if r < len(snake_case_ ) and array[r] < array[smallest]: _snake_case : Tuple = r if smallest != idx: _snake_case , _snake_case : List[str] = array[smallest], array[idx] ( ( _snake_case ) , ( _snake_case ) , ) : List[str] = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) _snake_case : List[Any] = smallest else: break def lowerCamelCase__ ( self , snake_case_ ): _snake_case : Union[str, Any] = self.get_parent_idx(snake_case_ ) while p >= 0 and self.heap[p] > self.heap[idx]: _snake_case , _snake_case : Dict = self.heap[idx], self.heap[p] _snake_case , _snake_case : Dict = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) _snake_case : Optional[int] = p _snake_case : Dict = self.get_parent_idx(snake_case_ ) def lowerCamelCase__ ( self ): return self.heap[0] def lowerCamelCase__ ( self ): _snake_case , _snake_case : Union[str, Any] = self.heap[-1], self.heap[0] _snake_case , _snake_case : Optional[Any] = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) _snake_case : int = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowerCamelCase__ ( self , snake_case_ ): self.heap.append(snake_case_ ) _snake_case : int = len(self.heap ) - 1 _snake_case : List[str] = node.val self.sift_up(len(self.heap ) - 1 ) def lowerCamelCase__ ( self ): return len(self.heap ) == 0 def lowerCamelCase__ ( self , snake_case_ , snake_case_ ): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" _snake_case : List[str] = new_value _snake_case : Union[str, Any] = new_value self.sift_up(self.idx_of_element[node] ) _a : Any = Node("""R""", -1) _a : Any = Node("""B""", 6) _a : Optional[int] = Node("""A""", 3) _a : List[str] = Node("""X""", 1) _a : int = Node("""E""", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array _a : str = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("""Min Heap - before decrease key""") for i in my_min_heap.heap: print(i) print("""Min Heap - After decrease key of node [B -> -17]""") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
720
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _a : Optional[int] = { """configuration_pix2struct""": [ """PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Pix2StructConfig""", """Pix2StructTextConfig""", """Pix2StructVisionConfig""", ], """processing_pix2struct""": ["""Pix2StructProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = ["""Pix2StructImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Dict = [ """PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Pix2StructPreTrainedModel""", """Pix2StructForConditionalGeneration""", """Pix2StructVisionModel""", """Pix2StructTextModel""", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys _a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer _a : Tuple = logging.get_logger(__name__) _a : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} _a : Tuple = { '''vocab_file''': { '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json''' }, '''merges_file''': { '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt''' }, } _a : Dict = {'''allegro/herbert-base-cased''': 514} _a : Tuple = {} class _UpperCAmelCase ( SCREAMING_SNAKE_CASE__): __lowercase : Optional[Any] = VOCAB_FILES_NAMES __lowercase : Any = PRETRAINED_VOCAB_FILES_MAP __lowercase : Dict = PRETRAINED_INIT_CONFIGURATION __lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : int = HerbertTokenizer def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_="</s>" , **snake_case_ , ): super().__init__( _lowercase , _lowercase , tokenizer_file=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , sep_token=_lowercase , **_lowercase , ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ): _snake_case : str = [self.cls_token_id] _snake_case : Dict = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase ) if token_ids_a is None: return [1] + ([0] * len(_lowercase )) + [1] return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1] def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ): _snake_case : str = [self.sep_token_id] _snake_case : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ): _snake_case : Union[str, Any] = self._tokenizer.model.save(_lowercase , name=_lowercase ) return tuple(_lowercase )
721
"""simple docstring""" import argparse import json import subprocess def a__ ( a : Optional[Any] , a : Optional[int] ): """simple docstring""" _snake_case : str = [] _snake_case : Optional[Any] = ( f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"' " https://api.github.com/repos/huggingface/transformers/actions/runners" ) _snake_case : Dict = subprocess.run(a , shell=a , stdout=subprocess.PIPE ) _snake_case : Tuple = output.stdout.decode("utf-8" ) _snake_case : List[str] = json.loads(a ) _snake_case : Any = status["runners"] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(a ) # save the result so we can report them on Slack with open("offline_runners.txt" , "w" ) as fp: fp.write(json.dumps(a ) ) if len(a ) > 0: _snake_case : Any = "\n".join([x["name"] for x in offline_runners] ) raise ValueError(f'The following runners are offline:\n{failed}' ) if __name__ == "__main__": def a__ ( a : Optional[int] ): """simple docstring""" return values.split("," ) _a : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--target_runners""", default=None, type=list_str, required=True, help="""Comma-separated list of runners to check status.""", ) parser.add_argument( """--token""", default=None, type=str, required=True, help="""A token that has actions:read permission.""" ) _a : List[str] = parser.parse_args() get_runner_status(args.target_runners, args.token)
87
0
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _UpperCAmelCase : @staticmethod def lowerCamelCase__ ( *snake_case_ , **snake_case_ ): pass @is_pipeline_test @require_vision class _UpperCAmelCase ( unittest.TestCase): @require_torch def lowerCamelCase__ ( self ): _snake_case : Any = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , ) _snake_case : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) _snake_case : Tuple = image_classifier(A_ , candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(A_ ) , [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ] , ) _snake_case : Optional[Any] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(A_ ) , [ [ {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, ], [ {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, ], [ {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, ], [ {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, ], [ {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, ], ] , ) @require_tf def lowerCamelCase__ ( self ): _snake_case : str = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" ) _snake_case : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) _snake_case : List[Any] = image_classifier(A_ , candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(A_ ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , ) _snake_case : int = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(A_ ) , [ [ {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, ], [ {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, ], [ {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, ], [ {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, ], [ {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, {"score": 0.333, "label": ANY(A_ )}, ], ] , ) @slow @require_torch def lowerCamelCase__ ( self ): _snake_case : str = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , ) # This is an image of 2 cats with remotes and no planes _snake_case : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) _snake_case : Tuple = image_classifier(A_ , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(A_ ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) _snake_case : Tuple = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(A_ ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , ) @slow @require_tf def lowerCamelCase__ ( self ): _snake_case : Tuple = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" ) # This is an image of 2 cats with remotes and no planes _snake_case : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) _snake_case : int = image_classifier(A_ , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(A_ ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) _snake_case : Tuple = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(A_ ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , )
700
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class _UpperCAmelCase ( unittest.TestCase): def lowerCamelCase__ ( self ): _snake_case : List[Any] = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) _snake_case : List[Any] = Vector() def lowerCamelCase__ ( self ): _snake_case : Any = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(snake_case_ ) , "(0,0,0,0,0,1)" ) def lowerCamelCase__ ( self ): _snake_case : Dict = Vector([1, 2, 3, 4] ) self.assertEqual(len(snake_case_ ) , 4 ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = Vector([1, 2] ) _snake_case : List[str] = Vector([1, 2, 3, 4, 5] ) _snake_case : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) _snake_case : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = Vector([1, 2, 3] ) _snake_case : Any = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def lowerCamelCase__ ( self ): _snake_case : str = Vector([1, 2, 3] ) _snake_case : Union[str, Any] = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def lowerCamelCase__ ( self ): _snake_case : Optional[int] = Vector([1, 2, 3] ) _snake_case : List[Any] = Vector([2, -1, 4] ) # for test of dot product _snake_case : Union[str, Any] = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" ) self.assertEqual((a * b) , 0 ) def lowerCamelCase__ ( self ): self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 ) def lowerCamelCase__ ( self ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" ) def lowerCamelCase__ ( self ): _snake_case : Tuple = Vector([1, 2, 3] ) _snake_case : Optional[Any] = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , snake_case_ , snake_case_ ) ) , "(3,4,7)" ) def lowerCamelCase__ ( self ): _snake_case : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] ) _snake_case : Optional[int] = x.copy() self.assertEqual(str(snake_case_ ) , str(snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : Dict = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(snake_case_ ) , "(0,1,0)" ) def lowerCamelCase__ ( self ): _snake_case : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(snake_case_ , snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : Optional[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(snake_case_ , snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def lowerCamelCase__ ( self ): _snake_case : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) _snake_case : List[str] = Vector([1, 2, 3] ) self.assertEqual("(14,32,50)" , str(a * x ) ) self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) ) def lowerCamelCase__ ( self ): _snake_case : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def lowerCamelCase__ ( self ): _snake_case : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) ) def lowerCamelCase__ ( self ): _snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) ) def lowerCamelCase__ ( self ): self.assertEqual( "|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
87
0
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class _UpperCAmelCase ( lowercase__): __lowercase : torch.FloatTensor class _UpperCAmelCase ( lowercase__ , lowercase__): @register_to_config def __init__( self , snake_case_ = 32 , snake_case_ = 64 , snake_case_ = 20 , snake_case_ = 7_68 , snake_case_=77 , snake_case_=4 , snake_case_ = 0.0 , snake_case_ = "silu" , snake_case_ = None , snake_case_ = None , snake_case_ = "linear" , snake_case_ = "prd" , snake_case_ = None , snake_case_ = None , snake_case_ = None , ): super().__init__() _snake_case : str = num_attention_heads _snake_case : Dict = attention_head_dim _snake_case : Tuple = num_attention_heads * attention_head_dim _snake_case : Dict = additional_embeddings _snake_case : List[str] = time_embed_dim or inner_dim _snake_case : str = embedding_proj_dim or embedding_dim _snake_case : Any = clip_embed_dim or embedding_dim _snake_case : Any = Timesteps(snake_case_ , snake_case_ , 0 ) _snake_case : int = TimestepEmbedding(snake_case_ , snake_case_ , out_dim=snake_case_ , act_fn=snake_case_ ) _snake_case : Optional[int] = nn.Linear(snake_case_ , snake_case_ ) if embedding_proj_norm_type is None: _snake_case : Union[str, Any] = None elif embedding_proj_norm_type == "layer": _snake_case : Any = nn.LayerNorm(snake_case_ ) else: raise ValueError(F'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}' ) _snake_case : Union[str, Any] = nn.Linear(snake_case_ , snake_case_ ) if encoder_hid_proj_type is None: _snake_case : Tuple = None elif encoder_hid_proj_type == "linear": _snake_case : List[str] = nn.Linear(snake_case_ , snake_case_ ) else: raise ValueError(F'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}' ) _snake_case : Any = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , snake_case_ ) ) if added_emb_type == "prd": _snake_case : Tuple = nn.Parameter(torch.zeros(1 , 1 , snake_case_ ) ) elif added_emb_type is None: _snake_case : Tuple = None else: raise ValueError( F'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.' ) _snake_case : Dict = nn.ModuleList( [ BasicTransformerBlock( snake_case_ , snake_case_ , snake_case_ , dropout=snake_case_ , activation_fn="gelu" , attention_bias=snake_case_ , ) for d in range(snake_case_ ) ] ) if norm_in_type == "layer": _snake_case : int = nn.LayerNorm(snake_case_ ) elif norm_in_type is None: _snake_case : int = None else: raise ValueError(F'Unsupported norm_in_type: {norm_in_type}.' ) _snake_case : Dict = nn.LayerNorm(snake_case_ ) _snake_case : List[str] = nn.Linear(snake_case_ , snake_case_ ) _snake_case : Union[str, Any] = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 ) causal_attention_mask.triu_(1 ) _snake_case : Optional[Any] = causal_attention_mask[None, ...] self.register_buffer("causal_attention_mask" , snake_case_ , persistent=snake_case_ ) _snake_case : Union[str, Any] = nn.Parameter(torch.zeros(1 , snake_case_ ) ) _snake_case : Union[str, Any] = nn.Parameter(torch.zeros(1 , snake_case_ ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowerCamelCase__ ( self ): _snake_case : Optional[int] = {} def fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_ ): if hasattr(snake_case_ , "set_processor" ): _snake_case : Optional[int] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F'{name}.{sub_name}' , snake_case_ , snake_case_ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_ ) return processors def lowerCamelCase__ ( self , snake_case_ ): _snake_case : Union[str, Any] = len(self.attn_processors.keys() ) if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != count: raise ValueError( F'A dict of processors was passed, but the number of processors {len(snake_case_ )} does not match the' F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' ) def fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_ ): if hasattr(snake_case_ , "set_processor" ): if not isinstance(snake_case_ , snake_case_ ): module.set_processor(snake_case_ ) else: module.set_processor(processor.pop(F'{name}.processor' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F'{name}.{sub_name}' , snake_case_ , snake_case_ ) for name, module in self.named_children(): fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_ ) def lowerCamelCase__ ( self ): self.set_attn_processor(AttnProcessor() ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = True , ): _snake_case : str = hidden_states.shape[0] _snake_case : Tuple = timestep if not torch.is_tensor(snake_case_ ): _snake_case : Any = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(snake_case_ ) and len(timesteps.shape ) == 0: _snake_case : List[Any] = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML _snake_case : Any = timesteps * torch.ones(snake_case_ , dtype=timesteps.dtype , device=timesteps.device ) _snake_case : List[str] = self.time_proj(snake_case_ ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. _snake_case : Union[str, Any] = timesteps_projected.to(dtype=self.dtype ) _snake_case : Dict = self.time_embedding(snake_case_ ) if self.embedding_proj_norm is not None: _snake_case : List[str] = self.embedding_proj_norm(snake_case_ ) _snake_case : Union[str, Any] = self.embedding_proj(snake_case_ ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: _snake_case : Optional[Any] = self.encoder_hidden_states_proj(snake_case_ ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" ) _snake_case : Dict = self.proj_in(snake_case_ ) _snake_case : str = self.positional_embedding.to(hidden_states.dtype ) _snake_case : Tuple = [] _snake_case : Any = 0 if encoder_hidden_states is not None: additional_embeds.append(snake_case_ ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: _snake_case : Any = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: _snake_case : Optional[int] = hidden_states[:, None, :] _snake_case : Optional[int] = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: _snake_case : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(snake_case_ , -1 , -1 ) additional_embeds.append(snake_case_ ) _snake_case : str = torch.cat( snake_case_ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens _snake_case : int = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: _snake_case : Dict = F.pad( snake_case_ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) _snake_case : List[Any] = hidden_states + positional_embeddings if attention_mask is not None: _snake_case : Optional[int] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0 _snake_case : Optional[int] = F.pad(snake_case_ , (0, self.additional_embeddings) , value=0.0 ) _snake_case : Union[str, Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) _snake_case : Any = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: _snake_case : Tuple = self.norm_in(snake_case_ ) for block in self.transformer_blocks: _snake_case : str = block(snake_case_ , attention_mask=snake_case_ ) _snake_case : List[Any] = self.norm_out(snake_case_ ) if self.prd_embedding is not None: _snake_case : Dict = hidden_states[:, -1] else: _snake_case : Tuple = hidden_states[:, additional_embeddings_len:] _snake_case : Tuple = self.proj_to_clip_embeddings(snake_case_ ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=snake_case_ ) def lowerCamelCase__ ( self , snake_case_ ): _snake_case : Union[str, Any] = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
701
"""simple docstring""" from __future__ import annotations from collections import namedtuple def a__ ( a : float , a : float , a : float ): """simple docstring""" _snake_case : Optional[Any] = namedtuple("result" , "name value" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("Only one argument must be 0" ) elif power < 0: raise ValueError( "Power cannot be negative in any electrical/electronics system" ) elif voltage == 0: return result("voltage" , power / current ) elif current == 0: return result("current" , power / voltage ) elif power == 0: return result("power" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
87
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _a : int = logging.get_logger(__name__) _a : List[Any] = { """microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""", } class _UpperCAmelCase ( _snake_case , _snake_case): __lowercase : List[Any] = """resnet""" __lowercase : List[str] = ["""basic""", """bottleneck"""] def __init__( self , snake_case_=3 , snake_case_=64 , snake_case_=[2_56, 5_12, 10_24, 20_48] , snake_case_=[3, 4, 6, 3] , snake_case_="bottleneck" , snake_case_="relu" , snake_case_=False , snake_case_=None , snake_case_=None , **snake_case_ , ): super().__init__(**snake_case_ ) if layer_type not in self.layer_types: raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' ) _snake_case : Any = num_channels _snake_case : List[str] = embedding_size _snake_case : Union[str, Any] = hidden_sizes _snake_case : int = depths _snake_case : Union[str, Any] = layer_type _snake_case : Union[str, Any] = hidden_act _snake_case : Tuple = downsample_in_first_stage _snake_case : List[str] = ["stem"] + [F'stage{idx}' for idx in range(1 , len(snake_case_ ) + 1 )] _snake_case , _snake_case : str = get_aligned_output_features_output_indices( out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names ) class _UpperCAmelCase ( _snake_case): __lowercase : Dict = version.parse("""1.11""") @property def lowerCamelCase__ ( self ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def lowerCamelCase__ ( self ): return 1E-3
702
"""simple docstring""" import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _UpperCAmelCase ( _snake_case , unittest.TestCase): __lowercase : Any = TextToVideoSDPipeline __lowercase : str = TEXT_TO_IMAGE_PARAMS __lowercase : int = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __lowercase : Optional[int] = frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ]) def lowerCamelCase__ ( self ): torch.manual_seed(0 ) _snake_case : str = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , ) _snake_case : List[Any] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , ) torch.manual_seed(0 ) _snake_case : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) _snake_case : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , ) _snake_case : Tuple = CLIPTextModel(snake_case_ ) _snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _snake_case : Any = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def lowerCamelCase__ ( self , snake_case_ , snake_case_=0 ): if str(snake_case_ ).startswith("mps" ): _snake_case : str = torch.manual_seed(snake_case_ ) else: _snake_case : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) _snake_case : str = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def lowerCamelCase__ ( self ): _snake_case : int = "cpu" # ensure determinism for the device-dependent torch.Generator _snake_case : Optional[Any] = self.get_dummy_components() _snake_case : Tuple = TextToVideoSDPipeline(**snake_case_ ) _snake_case : List[str] = sd_pipe.to(snake_case_ ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) _snake_case : int = self.get_dummy_inputs(snake_case_ ) _snake_case : Union[str, Any] = "np" _snake_case : Dict = sd_pipe(**snake_case_ ).frames _snake_case : Any = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) _snake_case : Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ ( self ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def lowerCamelCase__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=1E-2 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def lowerCamelCase__ ( self ): pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def lowerCamelCase__ ( self ): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): return super().test_progress_bar() @slow @skip_mps class _UpperCAmelCase ( unittest.TestCase): def lowerCamelCase__ ( self ): _snake_case : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" ) _snake_case : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) _snake_case : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) _snake_case : Tuple = pipe.to("cuda" ) _snake_case : List[Any] = "Spiderman is surfing" _snake_case : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) _snake_case : int = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type="pt" ).frames _snake_case : int = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def lowerCamelCase__ ( self ): _snake_case : Any = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" ) _snake_case : str = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) _snake_case : int = pipe.to("cuda" ) _snake_case : Any = "Spiderman is surfing" _snake_case : str = torch.Generator(device="cpu" ).manual_seed(0 ) _snake_case : Any = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="pt" ).frames _snake_case : Optional[int] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
87
0
"""simple docstring""" import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( "split_dict" , [ SplitDict(), SplitDict({"train": SplitInfo(name="train" , num_bytes=1_337 , num_examples=42 , dataset_name="my_dataset" )} ), SplitDict({"train": SplitInfo(name="train" , num_bytes=1_337 , num_examples=42 )} ), SplitDict({"train": SplitInfo()} ), ] , ) def a__ ( a : SplitDict ): """simple docstring""" _snake_case : List[Any] = split_dict._to_yaml_list() assert len(snake_case_ ) == len(snake_case_ ) _snake_case : str = SplitDict._from_yaml_list(snake_case_ ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump _snake_case : Tuple = None # the split name of split_dict takes over the name of the split info object _snake_case : Optional[Any] = split_name assert split_dict == reloaded @pytest.mark.parametrize( "split_info" , [SplitInfo(), SplitInfo(dataset_name=snake_case_ ), SplitInfo(dataset_name="my_dataset" )] ) def a__ ( a : Any ): """simple docstring""" _snake_case : Optional[int] = asdict(SplitDict({"train": split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
703
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class _UpperCAmelCase ( _snake_case): __lowercase : int = """EncodecFeatureExtractor""" __lowercase : str = ("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self , snake_case_ , snake_case_ ): super().__init__(snake_case_ , snake_case_ ) _snake_case : Dict = self.feature_extractor _snake_case : Any = False def lowerCamelCase__ ( self , snake_case_=None , snake_case_=None , snake_case_=True ): return self.tokenizer.get_decoder_prompt_ids(task=snake_case_ , language=snake_case_ , no_timestamps=snake_case_ ) def __call__( self , *snake_case_ , **snake_case_ ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*snake_case_ , **snake_case_ ) _snake_case : str = kwargs.pop("audio" , snake_case_ ) _snake_case : Optional[int] = kwargs.pop("sampling_rate" , snake_case_ ) _snake_case : Optional[Any] = kwargs.pop("text" , snake_case_ ) if len(snake_case_ ) > 0: _snake_case : Any = args[0] _snake_case : Union[str, Any] = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if text is not None: _snake_case : Any = self.tokenizer(snake_case_ , **snake_case_ ) if audio is not None: _snake_case : Any = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ ) if audio is None: return inputs elif text is None: return audio_inputs else: _snake_case : str = audio_inputs["input_values"] if "padding_mask" in audio_inputs: _snake_case : List[str] = audio_inputs["padding_mask"] return inputs def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ): _snake_case : Tuple = kwargs.pop("audio" , snake_case_ ) _snake_case : List[str] = kwargs.pop("padding_mask" , snake_case_ ) if len(snake_case_ ) > 0: _snake_case : Tuple = args[0] _snake_case : Dict = args[1:] if audio_values is not None: return self._decode_audio(snake_case_ , padding_mask=snake_case_ ) else: return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ ) def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ): return self.tokenizer.decode(*snake_case_ , **snake_case_ ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ): _snake_case : Optional[int] = to_numpy(snake_case_ ) _snake_case , _snake_case , _snake_case : Tuple = audio_values.shape if padding_mask is None: return list(snake_case_ ) _snake_case : Optional[int] = to_numpy(snake_case_ ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) _snake_case : Any = seq_len - padding_mask.shape[-1] _snake_case : Optional[Any] = 1 - self.feature_extractor.padding_value _snake_case : Optional[int] = np.pad(snake_case_ , ((0, 0), (0, difference)) , "constant" , constant_values=snake_case_ ) _snake_case : Any = audio_values.tolist() for i in range(snake_case_ ): _snake_case : Tuple = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] _snake_case : Tuple = sliced_audio.reshape(snake_case_ , -1 ) return audio_values
87
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _a : Tuple = { "configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"], "configuration_data2vec_text": [ "DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecTextConfig", "Data2VecTextOnnxConfig", ], "configuration_data2vec_vision": [ "DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecVisionConfig", "Data2VecVisionOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = [ "DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecAudioForAudioFrameClassification", "Data2VecAudioForCTC", "Data2VecAudioForSequenceClassification", "Data2VecAudioForXVector", "Data2VecAudioModel", "Data2VecAudioPreTrainedModel", ] _a : Dict = [ "DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecTextForCausalLM", "Data2VecTextForMaskedLM", "Data2VecTextForMultipleChoice", "Data2VecTextForQuestionAnswering", "Data2VecTextForSequenceClassification", "Data2VecTextForTokenClassification", "Data2VecTextModel", "Data2VecTextPreTrainedModel", ] _a : Union[str, Any] = [ "DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecVisionForImageClassification", "Data2VecVisionForMaskedImageModeling", "Data2VecVisionForSemanticSegmentation", "Data2VecVisionModel", "Data2VecVisionPreTrainedModel", ] if is_tf_available(): _a : Union[str, Any] = [ "TFData2VecVisionForImageClassification", "TFData2VecVisionForSemanticSegmentation", "TFData2VecVisionModel", "TFData2VecVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys _a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
704
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _a : str = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[Any] = ["""YolosFeatureExtractor"""] _a : List[Any] = ["""YolosImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Union[str, Any] = [ """YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""", """YolosForObjectDetection""", """YolosModel""", """YolosPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys _a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" from math import asin, atan, cos, radians, sin, sqrt, tan _a : Union[str, Any] = 6_378_137.0 _a : Dict = 6_356_752.314_245 _a : Dict = 6_378_137 def a__ ( a : float , a : float , a : float , a : float ): """simple docstring""" _snake_case : List[Any] = (AXIS_A - AXIS_B) / AXIS_A _snake_case : int = atan((1 - flattening) * tan(radians(a ) ) ) _snake_case : List[str] = atan((1 - flattening) * tan(radians(a ) ) ) _snake_case : int = radians(a ) _snake_case : List[Any] = radians(a ) # Equation _snake_case : Any = sin((phi_a - phi_a) / 2 ) _snake_case : int = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda _snake_case : List[str] = sqrt(sin_sq_phi + (cos(a ) * cos(a ) * sin_sq_lambda) ) return 2 * RADIUS * asin(a ) if __name__ == "__main__": import doctest doctest.testmod()
705
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Optional[int] = dataset _snake_case : str = process _snake_case : int = params def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): _snake_case : Union[str, Any] = self.dataset[i] _snake_case : Optional[Any] = self.process(snake_case_ , **self.params ) return processed class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ): _snake_case : Union[str, Any] = loader _snake_case : Tuple = infer _snake_case : List[Any] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _snake_case : int = None _snake_case : int = loader_batch_size # Internal bookkeeping _snake_case : Any = None _snake_case : Dict = None def __len__( self ): return len(self.loader ) def __iter__( self ): _snake_case : int = iter(self.loader ) return self def lowerCamelCase__ ( self ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _snake_case : List[Any] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _snake_case : int = {} for k, element in self._loader_batch_data.items(): if isinstance(snake_case_ , snake_case_ ): # Convert ModelOutput to tuple first _snake_case : Tuple = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _snake_case : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _snake_case : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case_ , snake_case_ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _snake_case : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _snake_case : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _snake_case : Tuple = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _snake_case : Union[str, Any] = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _snake_case : List[Any] = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _snake_case : int = self._loader_batch_data.__class__(snake_case_ ) self._loader_batch_index += 1 return result def lowerCamelCase__ ( self ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _snake_case : Tuple = next(self.iterator ) _snake_case : Any = self.infer(snake_case_ , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(snake_case_ , torch.Tensor ): _snake_case : Union[str, Any] = processed else: _snake_case : Optional[int] = list(processed.keys() )[0] _snake_case : List[str] = processed[key] if isinstance(snake_case_ , snake_case_ ): _snake_case : Dict = len(snake_case_ ) else: _snake_case : Optional[int] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _snake_case : Union[str, Any] = observed_batch_size # Setting internal index to unwrap the batch _snake_case : str = processed _snake_case : List[Any] = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ): super().__init__(snake_case_ , snake_case_ , snake_case_ ) def __iter__( self ): _snake_case : Tuple = iter(self.loader ) _snake_case : List[Any] = None return self def lowerCamelCase__ ( self ): if self.subiterator is None: _snake_case : Optional[Any] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _snake_case : Union[str, Any] = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _snake_case : str = self.infer(next(self.iterator ) , **self.params ) _snake_case : Tuple = next(self.subiterator ) return processed class _UpperCAmelCase ( _snake_case): def __iter__( self ): _snake_case : Optional[Any] = iter(self.loader ) return self def lowerCamelCase__ ( self ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. _snake_case : Optional[Any] = False _snake_case : Tuple = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _snake_case : Union[str, Any] = self.loader_batch_item() _snake_case : str = item.pop("is_last" ) accumulator.append(snake_case_ ) if is_last: return accumulator while not is_last: _snake_case : List[str] = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(snake_case_ , torch.Tensor ): _snake_case : Union[str, Any] = processed else: _snake_case : Tuple = list(processed.keys() )[0] _snake_case : Tuple = processed[key] if isinstance(snake_case_ , snake_case_ ): _snake_case : Any = len(snake_case_ ) else: _snake_case : List[Any] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _snake_case : Dict = observed_batch_size _snake_case : List[Any] = processed _snake_case : List[str] = 0 while self._loader_batch_index < self.loader_batch_size: _snake_case : Union[str, Any] = self.loader_batch_item() _snake_case : int = item.pop("is_last" ) accumulator.append(snake_case_ ) if is_last: return accumulator else: _snake_case : Dict = processed _snake_case : Dict = item.pop("is_last" ) accumulator.append(snake_case_ ) return accumulator class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ ): _snake_case : str = dataset _snake_case : Any = key def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): return self.dataset[i][self.key] class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : int = dataset _snake_case : Any = keya _snake_case : int = keya def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
87
0
"""simple docstring""" from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _UpperCAmelCase ( yaml.SafeLoader): def lowerCamelCase__ ( self , snake_case_ ): _snake_case : str = [self.constructed_objects[key_node] for key_node, _ in node.value] _snake_case : Any = [tuple(a_ ) if isinstance(a_ , a_ ) else key for key in keys] _snake_case : Optional[int] = Counter(a_ ) _snake_case : Dict = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' ) def lowerCamelCase__ ( self , snake_case_ , snake_case_=False ): _snake_case : Any = super().construct_mapping(a_ , deep=a_ ) self._check_no_duplicates_on_constructed_node(a_ ) return mapping def a__ ( a : str ): """simple docstring""" _snake_case : List[str] = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: _snake_case : List[Any] = full_content[1:].index("---" ) + 1 _snake_case : Dict = """\n""".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(snake_case__ ) class _UpperCAmelCase ( __a): __lowercase : Optional[Any] = {"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def lowerCamelCase__ ( cls , snake_case_ ): with open(a_ , encoding="utf-8" ) as readme_file: _snake_case : List[Any] = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(a_ ) else: return cls() def lowerCamelCase__ ( self , snake_case_ ): if path.exists(): with open(a_ , encoding="utf-8" ) as readme_file: _snake_case : List[str] = readme_file.read() else: _snake_case : Dict = None _snake_case : List[str] = self._to_readme(a_ ) with open(a_ , "w" , encoding="utf-8" ) as readme_file: readme_file.write(a_ ) def lowerCamelCase__ ( self , snake_case_ = None ): if readme_content is not None: _snake_case : Union[str, Any] = _split_yaml_from_readme(a_ ) _snake_case : str = """---\n""" + self.to_yaml_string() + """---\n""" + content else: _snake_case : Any = """---\n""" + self.to_yaml_string() + """---\n""" return full_content @classmethod def lowerCamelCase__ ( cls , snake_case_ ): _snake_case : int = yaml.load(a_ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields _snake_case : Tuple = { (key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**a_ ) def lowerCamelCase__ ( self ): return yaml.safe_dump( { (key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=a_ , allow_unicode=a_ , encoding="utf-8" , ).decode("utf-8" ) lowerCamelCase_ : Dict = { """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser lowerCamelCase_ : str = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") lowerCamelCase_ : Optional[int] = ap.parse_args() lowerCamelCase_ : List[Any] = Path(args.readme_filepath) lowerCamelCase_ : str = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
706
"""simple docstring""" def a__ ( a : int ): """simple docstring""" if not isinstance(a , a ): raise TypeError("Input value must be an 'int' type" ) _snake_case : Union[str, Any] = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
87
0
"""simple docstring""" import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py _a : Optional[int] = """src/diffusers""" # Matches is_xxx_available() _a : Dict = re.compile(r"""is\_([a-z_]*)_available\(\)""") # Matches from xxx import bla _a : List[str] = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") _a : str = """\n{0} = None\n""" _a : Tuple = """\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n""" _a : Tuple = """\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n""" def a__ ( a : Optional[int] ): """simple docstring""" _snake_case : int = _re_backend.findall(__snake_case ) if len(__snake_case ) == 0: return None return "_and_".join(__snake_case ) def a__ ( ): """simple docstring""" with open(os.path.join(__snake_case , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f: _snake_case : List[Any] = f.readlines() # Get to the point we do the actual imports for type checking _snake_case : Union[str, Any] = 0 _snake_case : Optional[Any] = {} # Go through the end of the file while line_index < len(__snake_case ): # If the line contains is_backend_available, we grab all objects associated with the `else` block _snake_case : int = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("else:" ): line_index += 1 line_index += 1 _snake_case : List[str] = [] # Until we unindent, add backend objects to the list while line_index < len(__snake_case ) and len(lines[line_index] ) > 1: _snake_case : str = lines[line_index] _snake_case : str = _re_single_line_import.search(__snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__snake_case ) > 0: _snake_case : List[str] = objects else: line_index += 1 return backend_specific_objects def a__ ( a : Dict , a : Optional[int] ): """simple docstring""" if name.isupper(): return DUMMY_CONSTANT.format(__snake_case ) elif name.islower(): return DUMMY_FUNCTION.format(__snake_case , __snake_case ) else: return DUMMY_CLASS.format(__snake_case , __snake_case ) def a__ ( a : Union[str, Any]=None ): """simple docstring""" if backend_specific_objects is None: _snake_case : List[Any] = read_init() # For special correspondence backend to module name as used in the function requires_modulename _snake_case : Optional[Any] = {} for backend, objects in backend_specific_objects.items(): _snake_case : Any = "[" + ", ".join(f'"{b}"' for b in backend.split("_and_" ) ) + "]" _snake_case : Any = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] ) _snake_case : List[Any] = dummy_file return dummy_files def a__ ( a : List[Any]=False ): """simple docstring""" _snake_case : Tuple = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py _snake_case : Any = {"torch": "pt"} # Locate actual dummy modules and read their content. _snake_case : Optional[Any] = os.path.join(__snake_case , "utils" ) _snake_case : Tuple = { backend: os.path.join(__snake_case , f'dummy_{short_names.get(__snake_case , __snake_case )}_objects.py' ) for backend in dummy_files.keys() } _snake_case : List[Any] = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__snake_case ): with open(__snake_case , "r" , encoding="utf-8" , newline="\n" ) as f: _snake_case : int = f.read() else: _snake_case : List[str] = "" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f'Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main ' "__init__ has new objects." ) with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( "The main __init__ has objects that are not present in " f'diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` ' "to fix this." ) if __name__ == "__main__": _a : Any = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") _a : Optional[Any] = parser.parse_args() check_dummies(args.fix_and_overwrite)
707
"""simple docstring""" from __future__ import annotations import requests _a : List[str] = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def a__ ( a : str , a : int = 1 , a : str = "new" , a : list | None = None ): """simple docstring""" _snake_case : Any = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(a ) - valid_terms ) ): _snake_case : Optional[int] = f'Invalid search term: {invalid_search_terms}' raise ValueError(a ) _snake_case : int = requests.get( f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , ) if response.status_code == 429: raise requests.HTTPError _snake_case : Optional[Any] = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(a )} _snake_case : Tuple = {} for id_ in range(a ): _snake_case : List[str] = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
87
0
"""simple docstring""" import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def a__ ( ): """simple docstring""" _snake_case : int = argparse.ArgumentParser() parser.add_argument("--model_ckpt" , type=UpperCAmelCase__ , default="microsoft/unixcoder-base-nine" ) parser.add_argument("--num_epochs" , type=UpperCAmelCase__ , default=5 ) parser.add_argument("--batch_size" , type=UpperCAmelCase__ , default=6 ) parser.add_argument("--gradient_accumulation_steps" , type=UpperCAmelCase__ , default=1 ) parser.add_argument("--freeze" , type=UpperCAmelCase__ , default=UpperCAmelCase__ ) parser.add_argument("--learning_rate" , type=UpperCAmelCase__ , default=5e-4 ) parser.add_argument("--seed" , type=UpperCAmelCase__ , default=0 ) parser.add_argument("--lr_scheduler_type" , type=UpperCAmelCase__ , default="cosine" ) parser.add_argument("--num_warmup_steps" , type=UpperCAmelCase__ , default=10 ) parser.add_argument("--weight_decay" , type=UpperCAmelCase__ , default=0.01 ) parser.add_argument("--output_dir" , type=UpperCAmelCase__ , default="./results" ) return parser.parse_args() _a : Any = load("""accuracy""") def a__ ( a : Optional[Any] ): """simple docstring""" _snake_case : str = eval_pred _snake_case : Union[str, Any] = np.argmax(UpperCAmelCase__ , axis=1 ) return metric.compute(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ ) class _UpperCAmelCase ( _UpperCAmelCase): def __init__( self , snake_case_ ): super().__init__() _snake_case : List[Any] = trainer def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ): if control.should_evaluate: _snake_case : Union[str, Any] = deepcopy(lowercase_ ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" ) return control_copy def a__ ( ): """simple docstring""" _snake_case : Union[str, Any] = get_args() set_seed(args.seed ) _snake_case : Union[str, Any] = load_dataset("codeparrot/codecomplex" , split="train" ) _snake_case : Any = dataset.train_test_split(test_size=0.2 ) _snake_case : Any = train_test["""test"""].train_test_split(test_size=0.5 ) _snake_case : str = DatasetDict( { "train": train_test["train"], "test": test_validation["train"], "valid": test_validation["test"], } ) print("Loading tokenizer and model" ) _snake_case : int = AutoTokenizer.from_pretrained(args.model_ckpt ) _snake_case : Optional[int] = tokenizer.eos_token _snake_case : Any = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) _snake_case : Optional[Any] = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): _snake_case : Tuple = False _snake_case : Union[str, Any] = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) ) def tokenize(a : Optional[Any] ): _snake_case : Any = tokenizer(example["src"] , truncation=UpperCAmelCase__ , max_length=1_024 ) _snake_case : int = labels.straint(example["complexity"] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } _snake_case : List[str] = train_test_validation.map( UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=train_test_validation["train"].column_names , ) _snake_case : int = DataCollatorWithPadding(tokenizer=UpperCAmelCase__ ) _snake_case : Dict = TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , ) _snake_case : List[Any] = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , ) print("Training..." ) trainer.add_callback(CustomCallback(UpperCAmelCase__ ) ) trainer.train() if __name__ == "__main__": main()
708
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def a__ ( a : float , a : float , a : bool = False ): """simple docstring""" if radian_mode: return [magnitude * cos(a ), magnitude * sin(a )] return [magnitude * cos(radians(a ) ), magnitude * sin(radians(a ) )] def a__ ( a : NDArray[floataa] , a : NDArray[floataa] , a : float = 10**-1 ): """simple docstring""" _snake_case : NDArray[floataa] = cross(a , a ) _snake_case : float = sum(a ) return abs(a ) < eps if __name__ == "__main__": # Test to check if it works _a : Tuple = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) _a : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg _a : List[Any] = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) _a : List[Any] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg _a : List[str] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]]) _a : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
87
0
"""simple docstring""" from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class _UpperCAmelCase ( a__): __lowercase : torch.FloatTensor class _UpperCAmelCase ( nn.Module): def __init__( self , snake_case_=3 , snake_case_=3 , snake_case_=("DownEncoderBlock2D",) , snake_case_=(64,) , snake_case_=2 , snake_case_=32 , snake_case_="silu" , snake_case_=True , ): super().__init__() _snake_case : Tuple = layers_per_block _snake_case : List[str] = torch.nn.Convad( lowerCAmelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) _snake_case : str = None _snake_case : Any = nn.ModuleList([] ) # down _snake_case : Tuple = block_out_channels[0] for i, down_block_type in enumerate(lowerCAmelCase__ ): _snake_case : Dict = output_channel _snake_case : str = block_out_channels[i] _snake_case : int = i == len(lowerCAmelCase__ ) - 1 _snake_case : List[Any] = get_down_block( lowerCAmelCase__ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase__ , resnet_groups=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , ) self.down_blocks.append(lowerCAmelCase__ ) # mid _snake_case : str = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , ) # out _snake_case : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase__ , eps=1E-6 ) _snake_case : Dict = nn.SiLU() _snake_case : Union[str, Any] = 2 * out_channels if double_z else out_channels _snake_case : List[Any] = nn.Convad(block_out_channels[-1] , lowerCAmelCase__ , 3 , padding=1 ) _snake_case : List[str] = False def lowerCamelCase__ ( self , snake_case_ ): _snake_case : Optional[Any] = x _snake_case : Dict = self.conv_in(lowerCAmelCase__ ) if self.training and self.gradient_checkpointing: def create_custom_forward(snake_case_ ): def custom_forward(*snake_case_ ): return module(*lowerCAmelCase__ ) return custom_forward # down if is_torch_version(">=" , "1.11.0" ): for down_block in self.down_blocks: _snake_case : List[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ ) # middle _snake_case : List[str] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ ) else: for down_block in self.down_blocks: _snake_case : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ ) # middle _snake_case : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase__ ) else: # down for down_block in self.down_blocks: _snake_case : Optional[int] = down_block(lowerCAmelCase__ ) # middle _snake_case : int = self.mid_block(lowerCAmelCase__ ) # post-process _snake_case : Tuple = self.conv_norm_out(lowerCAmelCase__ ) _snake_case : int = self.conv_act(lowerCAmelCase__ ) _snake_case : str = self.conv_out(lowerCAmelCase__ ) return sample class _UpperCAmelCase ( nn.Module): def __init__( self , snake_case_=3 , snake_case_=3 , snake_case_=("UpDecoderBlock2D",) , snake_case_=(64,) , snake_case_=2 , snake_case_=32 , snake_case_="silu" , snake_case_="group" , ): super().__init__() _snake_case : List[str] = layers_per_block _snake_case : Optional[int] = nn.Convad( lowerCAmelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) _snake_case : int = None _snake_case : Union[str, Any] = nn.ModuleList([] ) _snake_case : Optional[int] = in_channels if norm_type == "spatial" else None # mid _snake_case : Any = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , ) # up _snake_case : List[str] = list(reversed(lowerCAmelCase__ ) ) _snake_case : List[Any] = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowerCAmelCase__ ): _snake_case : str = output_channel _snake_case : Tuple = reversed_block_out_channels[i] _snake_case : Any = i == len(lowerCAmelCase__ ) - 1 _snake_case : str = get_up_block( lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase__ , resnet_groups=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , resnet_time_scale_shift=lowerCAmelCase__ , ) self.up_blocks.append(lowerCAmelCase__ ) _snake_case : List[str] = output_channel # out if norm_type == "spatial": _snake_case : Optional[Any] = SpatialNorm(block_out_channels[0] , lowerCAmelCase__ ) else: _snake_case : Optional[Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase__ , eps=1E-6 ) _snake_case : int = nn.SiLU() _snake_case : Union[str, Any] = nn.Convad(block_out_channels[0] , lowerCAmelCase__ , 3 , padding=1 ) _snake_case : int = False def lowerCamelCase__ ( self , snake_case_ , snake_case_=None ): _snake_case : Any = z _snake_case : Union[str, Any] = self.conv_in(lowerCAmelCase__ ) _snake_case : Any = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(snake_case_ ): def custom_forward(*snake_case_ ): return module(*lowerCAmelCase__ ) return custom_forward if is_torch_version(">=" , "1.11.0" ): # middle _snake_case : Optional[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCAmelCase__ , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ ) _snake_case : List[Any] = sample.to(lowerCAmelCase__ ) # up for up_block in self.up_blocks: _snake_case : str = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ ) else: # middle _snake_case : Optional[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCAmelCase__ , lowerCAmelCase__ ) _snake_case : int = sample.to(lowerCAmelCase__ ) # up for up_block in self.up_blocks: _snake_case : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ ) else: # middle _snake_case : List[Any] = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ ) _snake_case : int = sample.to(lowerCAmelCase__ ) # up for up_block in self.up_blocks: _snake_case : Optional[Any] = up_block(lowerCAmelCase__ , lowerCAmelCase__ ) # post-process if latent_embeds is None: _snake_case : str = self.conv_norm_out(lowerCAmelCase__ ) else: _snake_case : Optional[int] = self.conv_norm_out(lowerCAmelCase__ , lowerCAmelCase__ ) _snake_case : Any = self.conv_act(lowerCAmelCase__ ) _snake_case : Tuple = self.conv_out(lowerCAmelCase__ ) return sample class _UpperCAmelCase ( nn.Module): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , snake_case_="random" , snake_case_=False , snake_case_=True ): super().__init__() _snake_case : Union[str, Any] = n_e _snake_case : Optional[Any] = vq_embed_dim _snake_case : int = beta _snake_case : Any = legacy _snake_case : str = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) _snake_case : str = remap if self.remap is not None: self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) ) _snake_case : List[Any] = self.used.shape[0] _snake_case : Optional[int] = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": _snake_case : List[str] = self.re_embed _snake_case : List[str] = self.re_embed + 1 print( F'Remapping {self.n_e} indices to {self.re_embed} indices. ' F'Using {self.unknown_index} for unknown indices.' ) else: _snake_case : str = n_e _snake_case : Union[str, Any] = sane_index_shape def lowerCamelCase__ ( self , snake_case_ ): _snake_case : List[Any] = inds.shape assert len(lowerCAmelCase__ ) > 1 _snake_case : Union[str, Any] = inds.reshape(ishape[0] , -1 ) _snake_case : Optional[Any] = self.used.to(lowerCAmelCase__ ) _snake_case : List[str] = (inds[:, :, None] == used[None, None, ...]).long() _snake_case : str = match.argmax(-1 ) _snake_case : int = match.sum(2 ) < 1 if self.unknown_index == "random": _snake_case : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: _snake_case : Optional[Any] = self.unknown_index return new.reshape(lowerCAmelCase__ ) def lowerCamelCase__ ( self , snake_case_ ): _snake_case : str = inds.shape assert len(lowerCAmelCase__ ) > 1 _snake_case : Tuple = inds.reshape(ishape[0] , -1 ) _snake_case : Dict = self.used.to(lowerCAmelCase__ ) if self.re_embed > self.used.shape[0]: # extra token _snake_case : Optional[Any] = 0 # simply set to zero _snake_case : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase__ ) return back.reshape(lowerCAmelCase__ ) def lowerCamelCase__ ( self , snake_case_ ): # reshape z -> (batch, height, width, channel) and flatten _snake_case : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous() _snake_case : Optional[int] = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z _snake_case : Tuple = torch.argmin(torch.cdist(lowerCAmelCase__ , self.embedding.weight ) , dim=1 ) _snake_case : Dict = self.embedding(lowerCAmelCase__ ).view(z.shape ) _snake_case : Dict = None _snake_case : Tuple = None # compute loss for embedding if not self.legacy: _snake_case : Optional[int] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: _snake_case : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients _snake_case : Optional[int] = z + (z_q - z).detach() # reshape back to match original input shape _snake_case : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: _snake_case : Optional[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis _snake_case : Optional[Any] = self.remap_to_used(lowerCAmelCase__ ) _snake_case : List[Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: _snake_case : List[Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def lowerCamelCase__ ( self , snake_case_ , snake_case_ ): # shape specifying (batch, height, width, channel) if self.remap is not None: _snake_case : List[str] = indices.reshape(shape[0] , -1 ) # add batch axis _snake_case : Optional[Any] = self.unmap_to_all(lowerCAmelCase__ ) _snake_case : Any = indices.reshape(-1 ) # flatten again # get quantized latent vectors _snake_case : str = self.embedding(lowerCAmelCase__ ) if shape is not None: _snake_case : List[str] = z_q.view(lowerCAmelCase__ ) # reshape back to match original input shape _snake_case : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class _UpperCAmelCase ( a__): def __init__( self , snake_case_ , snake_case_=False ): _snake_case : Any = parameters _snake_case : Tuple = torch.chunk(lowerCAmelCase__ , 2 , dim=1 ) _snake_case : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 ) _snake_case : int = deterministic _snake_case : Optional[Any] = torch.exp(0.5 * self.logvar ) _snake_case : Optional[int] = torch.exp(self.logvar ) if self.deterministic: _snake_case : int = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def lowerCamelCase__ ( self , snake_case_ = None ): # make sure sample is on the same device as the parameters and has same dtype _snake_case : List[str] = randn_tensor( self.mean.shape , generator=lowerCAmelCase__ , device=self.parameters.device , dtype=self.parameters.dtype ) _snake_case : List[str] = self.mean + self.std * sample return x def lowerCamelCase__ ( self , snake_case_=None ): if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def lowerCamelCase__ ( self , snake_case_ , snake_case_=[1, 2, 3] ): if self.deterministic: return torch.Tensor([0.0] ) _snake_case : List[str] = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase__ ) def lowerCamelCase__ ( self ): return self.mean
709
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Optional[int] = logging.get_logger(__name__) _a : str = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class _UpperCAmelCase ( _snake_case): __lowercase : Optional[Any] = """openai-gpt""" __lowercase : Dict = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , snake_case_=4_04_78 , snake_case_=5_12 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_="cls_index" , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=0.1 , **snake_case_ , ): _snake_case : Tuple = vocab_size _snake_case : Dict = n_positions _snake_case : Any = n_embd _snake_case : Any = n_layer _snake_case : Optional[int] = n_head _snake_case : Union[str, Any] = afn _snake_case : Dict = resid_pdrop _snake_case : str = embd_pdrop _snake_case : Union[str, Any] = attn_pdrop _snake_case : str = layer_norm_epsilon _snake_case : Union[str, Any] = initializer_range _snake_case : Any = summary_type _snake_case : List[str] = summary_use_proj _snake_case : Optional[int] = summary_activation _snake_case : Union[str, Any] = summary_first_dropout _snake_case : Optional[int] = summary_proj_to_labels super().__init__(**snake_case_ )
87
0
"""simple docstring""" import flax.linen as nn import jax import jax.numpy as jnp class _UpperCAmelCase ( nn.Module): __lowercase : List[Any] = 4_2 __lowercase : Dict = jnp.floataa def lowerCamelCase__ ( self ): _snake_case : int = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , snake_case_ ): _snake_case , _snake_case , _snake_case , _snake_case : str = hidden_states.shape _snake_case : List[str] = jax.image.resize( __lowerCAmelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , ) _snake_case : str = self.conv(__lowerCAmelCase ) return hidden_states class _UpperCAmelCase ( nn.Module): __lowercase : Dict = 4_2 __lowercase : List[str] = jnp.floataa def lowerCamelCase__ ( self ): _snake_case : List[str] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , snake_case_ ): _snake_case : List[str] = self.conv(__lowerCAmelCase ) return hidden_states class _UpperCAmelCase ( nn.Module): __lowercase : int = 4_2 __lowercase : Dict = None __lowercase : Tuple = 0.0 __lowercase : str = None __lowercase : List[str] = jnp.floataa def lowerCamelCase__ ( self ): _snake_case : Tuple = self.in_channels if self.out_channels is None else self.out_channels _snake_case : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) _snake_case : str = nn.Conv( __lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _snake_case : str = nn.Dense(__lowerCAmelCase , dtype=self.dtype ) _snake_case : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) _snake_case : List[Any] = nn.Dropout(self.dropout_prob ) _snake_case : str = nn.Conv( __lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _snake_case : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut _snake_case : Dict = None if use_nin_shortcut: _snake_case : List[Any] = nn.Conv( __lowerCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , ) def __call__( self , snake_case_ , snake_case_ , snake_case_=True ): _snake_case : List[Any] = hidden_states _snake_case : str = self.norma(__lowerCAmelCase ) _snake_case : List[Any] = nn.swish(__lowerCAmelCase ) _snake_case : Dict = self.conva(__lowerCAmelCase ) _snake_case : Optional[int] = self.time_emb_proj(nn.swish(__lowerCAmelCase ) ) _snake_case : Union[str, Any] = jnp.expand_dims(jnp.expand_dims(__lowerCAmelCase , 1 ) , 1 ) _snake_case : int = hidden_states + temb _snake_case : List[Any] = self.norma(__lowerCAmelCase ) _snake_case : str = nn.swish(__lowerCAmelCase ) _snake_case : int = self.dropout(__lowerCAmelCase , __lowerCAmelCase ) _snake_case : Optional[int] = self.conva(__lowerCAmelCase ) if self.conv_shortcut is not None: _snake_case : Optional[Any] = self.conv_shortcut(__lowerCAmelCase ) return hidden_states + residual
710
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() _a : Tuple = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _a : int = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def a__ ( a : List[str] , a : int , a : int ): """simple docstring""" _snake_case : Union[str, Any] = state_dict.pop(a ) _snake_case : Union[str, Any] = val def a__ ( a : Tuple ): """simple docstring""" _snake_case : Tuple = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: _snake_case : Dict = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) _snake_case : Tuple = value else: _snake_case : Dict = value return new_state_dict def a__ ( a : int ): """simple docstring""" _snake_case : Any = "" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) _snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : int = in_proj_weight[:256, :] _snake_case : List[str] = in_proj_bias[:256] _snake_case : Optional[Any] = in_proj_weight[256:512, :] _snake_case : List[str] = in_proj_bias[256:512] _snake_case : Dict = in_proj_weight[-256:, :] _snake_case : Dict = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention _snake_case : List[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) _snake_case : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : Union[str, Any] = in_proj_weight[:256, :] _snake_case : Tuple = in_proj_bias[:256] _snake_case : int = in_proj_weight[256:512, :] _snake_case : int = in_proj_bias[256:512] _snake_case : Dict = in_proj_weight[-256:, :] _snake_case : str = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention _snake_case : Dict = state_dict.pop( f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) _snake_case : Optional[int] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict _snake_case : Dict = in_proj_weight_cross_attn[:256, :] _snake_case : Any = in_proj_bias_cross_attn[:256] _snake_case : Union[str, Any] = in_proj_weight_cross_attn[256:512, :] _snake_case : Optional[int] = in_proj_bias_cross_attn[256:512] _snake_case : Any = in_proj_weight_cross_attn[-256:, :] _snake_case : str = in_proj_bias_cross_attn[-256:] def a__ ( a : str , a : int ): """simple docstring""" _snake_case , _snake_case : List[str] = image.size _snake_case : Dict = max(a , a ) _snake_case : Union[str, Any] = 800 if "detection" in checkpoint_url else 1_000 _snake_case : Any = target_max_size / current_max_size _snake_case : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def a__ ( a : str ): """simple docstring""" _snake_case : str = F.to_tensor(a ) _snake_case : Union[str, Any] = F.normalize(a , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def a__ ( a : Optional[Any] , a : Any , a : Union[str, Any] ): """simple docstring""" logger.info("Converting model..." ) # load original state dict _snake_case : Tuple = torch.hub.load_state_dict_from_url(a , map_location="cpu" ) # rename keys for src, dest in rename_keys: rename_key(a , a , a ) _snake_case : Union[str, Any] = rename_backbone_keys(a ) # query, key and value matrices need special treatment read_in_q_k_v(a ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _snake_case : int = "model." for key in state_dict.copy().keys(): if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): _snake_case : Optional[int] = state_dict.pop(a ) _snake_case : Any = val # create HuggingFace model and load state dict _snake_case : Tuple = TableTransformerConfig( backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: _snake_case : Any = 15 _snake_case : int = 2 _snake_case : Optional[Any] = {0: "table", 1: "table rotated"} _snake_case : Union[str, Any] = idalabel _snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} else: _snake_case : Any = 125 _snake_case : Union[str, Any] = 6 _snake_case : List[str] = { 0: "table", 1: "table column", 2: "table row", 3: "table column header", 4: "table projected row header", 5: "table spanning cell", } _snake_case : Any = idalabel _snake_case : Optional[int] = {v: k for k, v in idalabel.items()} _snake_case : Union[str, Any] = DetrImageProcessor( format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 ) _snake_case : str = TableTransformerForObjectDetection(a ) model.load_state_dict(a ) model.eval() # verify our conversion _snake_case : Optional[int] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png" _snake_case : Optional[Any] = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=a ) _snake_case : Dict = Image.open(a ).convert("RGB" ) _snake_case : Union[str, Any] = normalize(resize(a , a ) ).unsqueeze(0 ) _snake_case : str = model(a ) if "detection" in checkpoint_url: _snake_case : int = (1, 15, 3) _snake_case : List[str] = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) _snake_case : List[str] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: _snake_case : Union[str, Any] = (1, 125, 7) _snake_case : str = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) _snake_case : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) image_processor.save_pretrained(a ) if push_to_hub: # Push model to HF hub logger.info("Pushing model to the hub..." ) _snake_case : int = ( "microsoft/table-transformer-detection" if "detection" in checkpoint_url else "microsoft/table-transformer-structure-recognition" ) model.push_to_hub(a ) image_processor.push_to_hub(a ) if __name__ == "__main__": _a : Tuple = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _a : Any = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
87
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class _UpperCAmelCase ( metaclass=__UpperCAmelCase): __lowercase : str = ["""flax"""] def __init__( self , *snake_case_ , **snake_case_ ): requires_backends(self , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) class _UpperCAmelCase ( metaclass=__UpperCAmelCase): __lowercase : Dict = ["""flax"""] def __init__( self , *snake_case_ , **snake_case_ ): requires_backends(self , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) class _UpperCAmelCase ( metaclass=__UpperCAmelCase): __lowercase : List[Any] = ["""flax"""] def __init__( self , *snake_case_ , **snake_case_ ): requires_backends(self , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) class _UpperCAmelCase ( metaclass=__UpperCAmelCase): __lowercase : Optional[int] = ["""flax"""] def __init__( self , *snake_case_ , **snake_case_ ): requires_backends(self , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) class _UpperCAmelCase ( metaclass=__UpperCAmelCase): __lowercase : List[Any] = ["""flax"""] def __init__( self , *snake_case_ , **snake_case_ ): requires_backends(self , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) class _UpperCAmelCase ( metaclass=__UpperCAmelCase): __lowercase : Any = ["""flax"""] def __init__( self , *snake_case_ , **snake_case_ ): requires_backends(self , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) class _UpperCAmelCase ( metaclass=__UpperCAmelCase): __lowercase : Dict = ["""flax"""] def __init__( self , *snake_case_ , **snake_case_ ): requires_backends(self , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) class _UpperCAmelCase ( metaclass=__UpperCAmelCase): __lowercase : Union[str, Any] = ["""flax"""] def __init__( self , *snake_case_ , **snake_case_ ): requires_backends(self , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) class _UpperCAmelCase ( metaclass=__UpperCAmelCase): __lowercase : List[str] = ["""flax"""] def __init__( self , *snake_case_ , **snake_case_ ): requires_backends(self , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) class _UpperCAmelCase ( metaclass=__UpperCAmelCase): __lowercase : Any = ["""flax"""] def __init__( self , *snake_case_ , **snake_case_ ): requires_backends(self , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) class _UpperCAmelCase ( metaclass=__UpperCAmelCase): __lowercase : Optional[Any] = ["""flax"""] def __init__( self , *snake_case_ , **snake_case_ ): requires_backends(self , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) class _UpperCAmelCase ( metaclass=__UpperCAmelCase): __lowercase : Tuple = ["""flax"""] def __init__( self , *snake_case_ , **snake_case_ ): requires_backends(self , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) class _UpperCAmelCase ( metaclass=__UpperCAmelCase): __lowercase : Optional[int] = ["""flax"""] def __init__( self , *snake_case_ , **snake_case_ ): requires_backends(self , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] ) @classmethod def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ): requires_backends(cls , ["flax"] )
711
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
87
0
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _a : Tuple = logging.get_logger(__name__) _a : Dict = {'vocab_file': 'spiece.model'} _a : Dict = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } _a : Optional[Any] = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) _a : List[str] = 0 _a : Tuple = 1 _a : Dict = 2 _a : Optional[Any] = 3 _a : List[Any] = 4 class _UpperCAmelCase ( UpperCamelCase_): __lowercase : Any = VOCAB_FILES_NAMES __lowercase : str = PRETRAINED_VOCAB_FILES_MAP __lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : List[Any] = """left""" def __init__( self , snake_case_ , snake_case_=False , snake_case_=True , snake_case_=False , snake_case_="<s>" , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<sep>" , snake_case_="<pad>" , snake_case_="<cls>" , snake_case_="<mask>" , snake_case_=["<eop>", "<eod>"] , snake_case_ = None , **snake_case_ , ): _snake_case : Union[str, Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token _snake_case : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , ) _snake_case : List[str] = 3 _snake_case : Tuple = do_lower_case _snake_case : Dict = remove_space _snake_case : Optional[int] = keep_accents _snake_case : List[Any] = vocab_file _snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase__ ) @property def lowerCamelCase__ ( self ): return len(self.sp_model ) def lowerCamelCase__ ( self ): _snake_case : int = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): _snake_case : int = self.__dict__.copy() _snake_case : Tuple = None return state def __setstate__( self , snake_case_ ): _snake_case : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _snake_case : Optional[Any] = {} _snake_case : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCamelCase__ ( self , snake_case_ ): if self.remove_space: _snake_case : List[Any] = ''' '''.join(inputs.strip().split() ) else: _snake_case : List[str] = inputs _snake_case : List[str] = outputs.replace("``" , "\"" ).replace("\'\'" , "\"" ) if not self.keep_accents: _snake_case : Any = unicodedata.normalize("NFKD" , UpperCamelCase__ ) _snake_case : List[str] = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] ) if self.do_lower_case: _snake_case : Union[str, Any] = outputs.lower() return outputs def lowerCamelCase__ ( self , snake_case_ ): _snake_case : Optional[Any] = self.preprocess_text(UpperCamelCase__ ) _snake_case : Optional[Any] = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ ) _snake_case : Optional[int] = [] for piece in pieces: if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): _snake_case : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _snake_case : Tuple = cur_pieces[1:] else: _snake_case : Union[str, Any] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCamelCase__ ) else: new_pieces.append(UpperCamelCase__ ) return new_pieces def lowerCamelCase__ ( self , snake_case_ ): return self.sp_model.PieceToId(UpperCamelCase__ ) def lowerCamelCase__ ( self , snake_case_ ): return self.sp_model.IdToPiece(UpperCamelCase__ ) def lowerCamelCase__ ( self , snake_case_ ): _snake_case : Any = ''''''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip() return out_string def lowerCamelCase__ ( self , snake_case_ , snake_case_ = False , snake_case_ = None , snake_case_ = True , **snake_case_ , ): _snake_case : Optional[int] = kwargs.pop("use_source_tokenizer" , UpperCamelCase__ ) _snake_case : Tuple = self.convert_ids_to_tokens(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _snake_case : Tuple = [] _snake_case : Optional[int] = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) ) _snake_case : List[str] = [] sub_texts.append(UpperCamelCase__ ) else: current_sub_text.append(UpperCamelCase__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens _snake_case : Any = ''''''.join(UpperCamelCase__ ) _snake_case : Optional[int] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _snake_case : Dict = self.clean_up_tokenization(UpperCamelCase__ ) return clean_text else: return text def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ): _snake_case : int = [self.sep_token_id] _snake_case : List[Any] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) if token_ids_a is not None: return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] return ([0] * len(UpperCamelCase__ )) + [1, 1] def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ): _snake_case : List[str] = [self.sep_token_id] _snake_case : Dict = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ): if not os.path.isdir(UpperCamelCase__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return _snake_case : List[Any] = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase__ , "wb" ) as fi: _snake_case : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase__ ) return (out_vocab_file,)
712
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _a : Optional[int] = logging.get_logger(__name__) _a : List[str] = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class _UpperCAmelCase ( _snake_case , _snake_case): __lowercase : List[Any] = """convnextv2""" def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=4 , snake_case_=None , snake_case_=None , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.0 , snake_case_=2_24 , snake_case_=None , snake_case_=None , **snake_case_ , ): super().__init__(**snake_case_ ) _snake_case : Tuple = num_channels _snake_case : Optional[int] = patch_size _snake_case : Tuple = num_stages _snake_case : int = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes _snake_case : str = [3, 3, 9, 3] if depths is None else depths _snake_case : int = hidden_act _snake_case : Tuple = initializer_range _snake_case : Union[str, Any] = layer_norm_eps _snake_case : Optional[int] = drop_path_rate _snake_case : Union[str, Any] = image_size _snake_case : List[Any] = ["stem"] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )] _snake_case , _snake_case : Dict = get_aligned_output_features_output_indices( out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
87
0
"""simple docstring""" import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class _UpperCAmelCase ( unittest.TestCase): def lowerCamelCase__ ( self ): _snake_case : Dict = 10 def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = [1, 2, 3, 4] _snake_case : str = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(_SCREAMING_SNAKE_CASE , self.block_size , 0 ) , _SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] _snake_case : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_SCREAMING_SNAKE_CASE , self.block_size , 0 ) , _SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self ): _snake_case : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] _snake_case : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_SCREAMING_SNAKE_CASE , self.block_size , 0 ) , _SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this." _snake_case , _snake_case : str = process_story(_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , [] ) def lowerCamelCase__ ( self ): _snake_case : List[str] = "" _snake_case , _snake_case : List[Any] = process_story(_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , [] ) self.assertEqual(_SCREAMING_SNAKE_CASE , [] ) def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) _snake_case , _snake_case : Optional[int] = process_story(_SCREAMING_SNAKE_CASE ) _snake_case : Optional[Any] = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case : int = ["It was the best of times."] self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self ): _snake_case : List[str] = torch.tensor([1, 2, 3, 4] ) _snake_case : Any = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(_SCREAMING_SNAKE_CASE , 0 ).numpy() , expected.numpy() ) def lowerCamelCase__ ( self ): _snake_case : Optional[int] = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) _snake_case : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_SCREAMING_SNAKE_CASE , 23 ).numpy() , expected.numpy() ) def lowerCamelCase__ ( self ): _snake_case : str = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) _snake_case : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_SCREAMING_SNAKE_CASE , 1 ).numpy() , expected.numpy() ) def lowerCamelCase__ ( self ): _snake_case : Dict = 1_01 _snake_case : Tuple = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] ) _snake_case : int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) _snake_case : Dict = compute_token_type_ids(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) np.testing.assert_array_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
713
"""simple docstring""" from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def a__ ( a : Namespace ): """simple docstring""" return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) _a : int = """ transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. """ class _UpperCAmelCase ( _snake_case): @staticmethod def lowerCamelCase__ ( snake_case_ ): _snake_case : Dict = parser.add_parser( "convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , ) train_parser.add_argument("--model_type" , type=snake_case_ , required=snake_case_ , help="Model's type." ) train_parser.add_argument( "--tf_checkpoint" , type=snake_case_ , required=snake_case_ , help="TensorFlow checkpoint path or folder." ) train_parser.add_argument( "--pytorch_dump_output" , type=snake_case_ , required=snake_case_ , help="Path to the PyTorch saved model output." ) train_parser.add_argument("--config" , type=snake_case_ , default="" , help="Configuration file path or folder." ) train_parser.add_argument( "--finetuning_task_name" , type=snake_case_ , default=snake_case_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , ) train_parser.set_defaults(func=snake_case_ ) def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ , ): _snake_case : str = logging.get_logger("transformers-cli/converting" ) self._logger.info(F'Loading model {model_type}' ) _snake_case : Optional[int] = model_type _snake_case : Any = tf_checkpoint _snake_case : Optional[int] = pytorch_dump_output _snake_case : Tuple = config _snake_case : Tuple = finetuning_task_name def lowerCamelCase__ ( self ): if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) if "ckpt" in self._tf_checkpoint.lower(): _snake_case : int = self._tf_checkpoint _snake_case : Optional[Any] = "" else: _snake_case : Optional[int] = self._tf_checkpoint _snake_case : List[str] = "" convert_transfo_xl_checkpoint_to_pytorch( snake_case_ , self._config , self._pytorch_dump_output , snake_case_ ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
87
0
from manim import * class _UpperCAmelCase ( UpperCAmelCase__): def lowerCamelCase__ ( self ): _snake_case : Union[str, Any] = Rectangle(height=0.5 , width=0.5 ) _snake_case : int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _snake_case : Optional[Any] = [mem.copy() for i in range(6 )] _snake_case : List[Any] = [mem.copy() for i in range(6 )] _snake_case : Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) _snake_case : Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) _snake_case : Optional[int] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 ) _snake_case : Union[str, Any] = Text("CPU" , font_size=24 ) _snake_case : str = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(snake_case_ ) _snake_case : Any = [mem.copy() for i in range(1 )] _snake_case : Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) _snake_case : Dict = Text("GPU" , font_size=24 ) _snake_case : str = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) gpu.align_to(snake_case_ , snake_case_ ) gpu.set_x(gpu.get_x() - 1 ) self.add(snake_case_ ) _snake_case : Tuple = [mem.copy() for i in range(6 )] _snake_case : Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) _snake_case : Union[str, Any] = Text("Model" , font_size=24 ) _snake_case : List[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) model.move_to([3, -1.0, 0] ) self.play( Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , ) _snake_case : List[Any] = MarkupText( F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , ) _snake_case : Union[str, Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _snake_case : Any = MarkupText( F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) ) self.add(snake_case_ ) _snake_case : int = [] _snake_case : Union[str, Any] = [] _snake_case : List[Any] = [] for i, rect in enumerate(snake_case_ ): _snake_case : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 ) cpu_target.move_to(snake_case_ ) cpu_target.generate_target() _snake_case : str = 0.46 / 4 _snake_case : Union[str, Any] = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 ) cpu_targs.append(snake_case_ ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) ) second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) ) self.play(*snake_case_ ) self.play(*snake_case_ ) self.wait()
714
"""simple docstring""" import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def a__ ( a : List[str] , a : Any ): """simple docstring""" if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer _snake_case : Any = flax_key_tuple[:-1] + ("weight",) _snake_case : str = torch.permute(a , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(a ): # linear layer _snake_case : Optional[int] = flax_key_tuple[:-1] + ("weight",) _snake_case : Any = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: _snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("weight",) return flax_key_tuple, flax_tensor def a__ ( a : List[Any] , a : Union[str, Any] , a : List[str] ): """simple docstring""" if "metadata" in layer: _snake_case : Optional[int] = layer.split("metadata" ) _snake_case : Optional[int] = "".join(split_layer[0] )[:-1] _snake_case : int = [tuple(("metadata" + split_layer[1]).split("/" ) )] elif "kvstore" in layer: _snake_case : Any = layer.split("kvstore" ) _snake_case : str = "".join(split_layer[0] )[:-1] _snake_case : Any = [tuple(("kvstore" + split_layer[1]).split("/" ) )] else: _snake_case : List[Any] = layer.split("/" ) _snake_case : Tuple = "/".join(split_layer[:-1] ) _snake_case : int = (split_layer[-1],) if "kvstore/path" in layer: _snake_case : Optional[Any] = f'{switch_checkpoint_path}/{checkpoint_info[layer]}' elif "kvstore/driver" in layer: _snake_case : Tuple = "file" else: _snake_case : Optional[int] = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def a__ ( a : List[Any] , a : List[Any] ): """simple docstring""" _snake_case : Union[str, Any] = rename_keys(a ) _snake_case : int = {} for k, v in current_block.items(): _snake_case : Optional[int] = v _snake_case : Optional[int] = new_current_block torch.save(a , a ) def a__ ( a : Dict , a : Tuple , a : List[str] , a : int , a : str = WEIGHTS_NAME ): """simple docstring""" _snake_case : Any = convert_file_size_to_int(a ) _snake_case : Tuple = [] _snake_case : Optional[int] = {} _snake_case : Tuple = 0 _snake_case : Optional[Any] = 0 os.makedirs(a , exist_ok=a ) with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp: _snake_case : Any = serialization.msgpack_restore(fp.read() )["optimizer"]["target"] _snake_case : Optional[Any] = flatten_dict(a , sep="/" ) _snake_case : Optional[Any] = {} for layer in checkpoint_info.keys(): _snake_case , _snake_case , _snake_case : int = get_key_and_tensorstore_dict( a , a , a ) if curr_real_layer_name in all_layers: _snake_case : Dict = content else: _snake_case : Tuple = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file _snake_case : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() _snake_case : Dict = torch.tensor(a ) _snake_case : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts _snake_case , _snake_case : Optional[int] = rename_base_flax_keys(tuple(key.split("/" ) ) , a ) _snake_case : Optional[Any] = "/".join(a ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: _snake_case : Any = os.path.join( a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) ) rename_and_save_block(a , a ) sharded_state_dicts.append(current_block.keys() ) del current_block _snake_case : List[Any] = {} _snake_case : str = 0 _snake_case : List[str] = raw_weights.to(getattr(a , a ) ) current_block_size += weight_size total_size += weight_size # Add the last block _snake_case : int = os.path.join(a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) ) rename_and_save_block(a , a ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(a ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index _snake_case : str = {} _snake_case : Any = {} for idx, shard in enumerate(a ): _snake_case : Optional[int] = weights_name.replace( ".bin" , f'-{idx+1:05d}-of-{len(a ):05d}.bin' ) # len(sharded_state_dicts):05d} _snake_case : Dict = os.path.join(a , weights_name.replace(".bin" , f'-{idx+1:05d}-of-???.bin' ) ) os.rename(a , os.path.join(a , a ) ) _snake_case : Dict = shard for key in shard: _snake_case : int = shard_file # Add the metadata _snake_case : List[Any] = {"total_size": total_size} _snake_case : Any = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f: _snake_case : Union[str, Any] = json.dumps(a , indent=2 , sort_keys=a ) + "\n" f.write(a ) return metadata, index if __name__ == "__main__": _a : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""") parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""", type=str, required=False, help="""Path to the output pytorch model.""", ) _a : Optional[int] = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def a__ ( ): """simple docstring""" from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer _snake_case : List[str] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" ) config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" ) _snake_case : str = SwitchTransformersForConditionalGeneration.from_pretrained( "/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" ) _snake_case : List[Any] = TaTokenizer.from_pretrained("t5-small" ) _snake_case : Optional[Any] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." _snake_case : Dict = tokenizer(a , return_tensors="pt" ).input_ids _snake_case : List[Any] = model.generate(a , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
87
0
"""simple docstring""" import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class _UpperCAmelCase ( unittest.TestCase): def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=4 , ): _snake_case : Optional[int] = parent _snake_case : int = batch_size _snake_case : Optional[int] = seq_length _snake_case : str = is_training _snake_case : List[Any] = use_attention_mask _snake_case : Tuple = use_token_type_ids _snake_case : str = use_labels _snake_case : Any = vocab_size _snake_case : Union[str, Any] = hidden_size _snake_case : List[Any] = num_hidden_layers _snake_case : List[Any] = num_attention_heads _snake_case : Optional[Any] = intermediate_size _snake_case : Tuple = hidden_act _snake_case : Any = hidden_dropout_prob _snake_case : Any = attention_probs_dropout_prob _snake_case : Dict = max_position_embeddings _snake_case : str = type_vocab_size _snake_case : Tuple = type_sequence_label_size _snake_case : List[str] = initializer_range _snake_case : Dict = num_choices def lowerCamelCase__ ( self ): _snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : Tuple = None if self.use_attention_mask: _snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case : int = None if self.use_token_type_ids: _snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _snake_case : List[str] = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase__ ( self ): _snake_case : str = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case , _snake_case : List[Any] = config_and_inputs _snake_case : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def lowerCamelCase__ ( self ): _snake_case : List[str] = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case , _snake_case : Optional[Any] = config_and_inputs _snake_case : Optional[Any] = True _snake_case : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class _UpperCAmelCase ( lowercase__ , unittest.TestCase): __lowercase : int = True __lowercase : Union[str, Any] = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase__ ( self ): _snake_case : Any = FlaxRobertaPreLayerNormModelTester(self ) @slow def lowerCamelCase__ ( self ): for model_class_name in self.all_model_classes: _snake_case : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=__lowercase ) _snake_case : List[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowercase ) @require_flax class _UpperCAmelCase ( unittest.TestCase): @slow def lowerCamelCase__ ( self ): _snake_case : Any = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=__lowercase ) _snake_case : Any = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa ) _snake_case : int = model(__lowercase )[0] _snake_case : Optional[int] = [1, 11, 5_02_65] self.assertEqual(list(output.shape ) , __lowercase ) # compare the actual values for a slice. _snake_case : Tuple = np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , __lowercase , atol=1E-4 ) ) @slow def lowerCamelCase__ ( self ): _snake_case : Dict = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=__lowercase ) _snake_case : int = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa ) _snake_case : Optional[Any] = model(__lowercase )[0] # compare the actual values for a slice. _snake_case : Tuple = np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , __lowercase , atol=1E-4 ) )
715
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class _UpperCAmelCase ( _snake_case , _snake_case , unittest.TestCase): __lowercase : Dict = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) __lowercase : Optional[Any] = ( { """feature-extraction""": TFMobileBertModel, """fill-mask""": TFMobileBertForMaskedLM, """question-answering""": TFMobileBertForQuestionAnswering, """text-classification""": TFMobileBertForSequenceClassification, """token-classification""": TFMobileBertForTokenClassification, """zero-shot""": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) __lowercase : Union[str, Any] = False __lowercase : Optional[int] = False def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_=False ): _snake_case : Union[str, Any] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) if return_labels: if model_class in get_values(snake_case_ ): _snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ): _snake_case : Optional[Any] = parent _snake_case : List[Any] = batch_size _snake_case : Optional[int] = seq_length _snake_case : Dict = is_training _snake_case : Union[str, Any] = use_input_mask _snake_case : List[Any] = use_token_type_ids _snake_case : int = use_labels _snake_case : Dict = vocab_size _snake_case : Tuple = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : Optional[Any] = intermediate_size _snake_case : Dict = hidden_act _snake_case : Tuple = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : str = max_position_embeddings _snake_case : str = type_vocab_size _snake_case : Any = type_sequence_label_size _snake_case : Optional[int] = initializer_range _snake_case : List[Any] = num_labels _snake_case : Optional[int] = num_choices _snake_case : Optional[int] = scope _snake_case : Any = embedding_size def lowerCamelCase__ ( self ): _snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : Optional[Any] = None if self.use_input_mask: _snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case : List[str] = None if self.use_token_type_ids: _snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _snake_case : Dict = None _snake_case : Tuple = None _snake_case : str = None if self.use_labels: _snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) _snake_case : Tuple = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Dict = TFMobileBertModel(config=snake_case_ ) _snake_case : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Optional[int] = model(snake_case_ ) _snake_case : Union[str, Any] = [input_ids, input_mask] _snake_case : Optional[Any] = model(snake_case_ ) _snake_case : Dict = model(snake_case_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : List[Any] = TFMobileBertForMaskedLM(config=snake_case_ ) _snake_case : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : List[str] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=snake_case_ ) _snake_case : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Tuple = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : str = TFMobileBertForPreTraining(config=snake_case_ ) _snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : List[Any] = model(snake_case_ ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : str = self.num_labels _snake_case : str = TFMobileBertForSequenceClassification(config=snake_case_ ) _snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Optional[int] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Any = self.num_choices _snake_case : Tuple = TFMobileBertForMultipleChoice(config=snake_case_ ) _snake_case : List[Any] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) ) _snake_case : List[str] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) ) _snake_case : Tuple = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) ) _snake_case : int = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _snake_case : Optional[Any] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Union[str, Any] = self.num_labels _snake_case : Optional[int] = TFMobileBertForTokenClassification(config=snake_case_ ) _snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : List[Any] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : int = TFMobileBertForQuestionAnswering(config=snake_case_ ) _snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Union[str, Any] = model(snake_case_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Tuple = config_and_inputs _snake_case : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict def lowerCamelCase__ ( self ): _snake_case : int = TFMobileBertModelTest.TFMobileBertModelTester(self ) _snake_case : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 ) def lowerCamelCase__ ( self ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self ): _snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ ) @slow def lowerCamelCase__ ( self ): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _snake_case : str = TFMobileBertModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) @require_tf class _UpperCAmelCase ( unittest.TestCase): @slow def lowerCamelCase__ ( self ): _snake_case : Any = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" ) _snake_case : str = tf.constant([[0, 1, 2, 3, 4, 5]] ) _snake_case : Union[str, Any] = model(snake_case_ )[0] _snake_case : int = [1, 6, 3_05_22] self.assertEqual(output.shape , snake_case_ ) _snake_case : Optional[Any] = tf.constant( [ [ [-4.5919547, -9.248295, -9.645256], [-6.7306175, -6.440284, -6.6052837], [-7.2743506, -6.7847915, -6.024673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
87
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a : str = logging.get_logger(__name__) _a : str = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class _UpperCAmelCase ( __snake_case): __lowercase : List[str] = 'openai-gpt' __lowercase : Any = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , snake_case_=4_04_78 , snake_case_=5_12 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_="cls_index" , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=0.1 , **snake_case_ , ): _snake_case : Tuple = vocab_size _snake_case : Tuple = n_positions _snake_case : Tuple = n_embd _snake_case : Tuple = n_layer _snake_case : Union[str, Any] = n_head _snake_case : int = afn _snake_case : Dict = resid_pdrop _snake_case : List[Any] = embd_pdrop _snake_case : Tuple = attn_pdrop _snake_case : Tuple = layer_norm_epsilon _snake_case : Any = initializer_range _snake_case : Dict = summary_type _snake_case : Optional[Any] = summary_use_proj _snake_case : int = summary_activation _snake_case : Any = summary_first_dropout _snake_case : Union[str, Any] = summary_proj_to_labels super().__init__(**A_ )
716
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _a : List[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = ["""BartphoTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys _a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def a__ ( a : str ): """simple docstring""" _snake_case : Optional[Any] = os.path.join(args.tf_model_dir , "parameters.json" ) _snake_case : Union[str, Any] = json.loads(open(a ).read() ) if not params: raise ValueError( f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' ) if not args.output.endswith(".pt" ): _snake_case : List[str] = args.output + ".pt" _snake_case : Tuple = OrderedDict() with tf.device("/CPU:0" ): _snake_case : int = tf.train.load_checkpoint(args.tf_model_dir ) _snake_case : int = reader.get_variable_to_shape_map() for key_name in shapes.keys(): _snake_case : Tuple = reader.get_tensor(a ).astype(np.floataa ) if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ): continue if key_name.startswith("pasts/" ): if key_name.startswith("pasts/mlp" ): _snake_case : Any = int(key_name[9] ) elif key_name.startswith("pasts/out" ): _snake_case : List[Any] = 8 _snake_case : Optional[Any] = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time _snake_case : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _snake_case : List[str] = torch.tensor(a ) elif key_name.startswith("model/moe" ): _snake_case : List[str] = int(key_name[9:].split("/" )[0] ) if key_name.endswith("/switch_gating/kernel" ): _snake_case : Optional[Any] = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player _snake_case : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _snake_case : List[str] = torch.tensor(a ) elif key_name.endswith("/softmlp/kernel" ): _snake_case : Tuple = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player _snake_case : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _snake_case : List[str] = torch.tensor(a ) elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ): _snake_case : Any = key_name[-9:-7] for i in range(16 ): _snake_case : List[Any] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer) _snake_case : List[str] = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided _snake_case : List[Any] = torch.tensor(a ) elif key_name.startswith("model/mlp" ): _snake_case : Tuple = int(key_name[9:].split("/" )[0] ) if key_name.endswith("/p1/kernel" ): _snake_case : Tuple = "model.blocks.%d.feed_forward.mlp.wi.weight" % player _snake_case : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _snake_case : str = torch.tensor(a ) elif key_name.endswith("/p1/bias" ): _snake_case : Optional[Any] = "model.blocks.%d.feed_forward.mlp.wi.bias" % player _snake_case : Optional[Any] = vnp.copy() # same because it is one dimensional _snake_case : Any = torch.tensor(a ) elif key_name.endswith("/p2/kernel" ): _snake_case : List[str] = "model.blocks.%d.feed_forward.mlp.wo.weight" % player _snake_case : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _snake_case : Tuple = torch.tensor(a ) elif key_name.endswith("/p2/bias" ): _snake_case : Dict = "model.blocks.%d.feed_forward.mlp.wo.bias" % player _snake_case : List[str] = vnp.copy() # same because it is one dimensional _snake_case : Any = torch.tensor(a ) elif key_name.startswith("model/ln" ): _snake_case : List[str] = int(key_name[8:].split("/" )[0] ) if key_name.endswith("/b" ): _snake_case : Any = "model.blocks.%d.feed_forward.norm.bias" % player _snake_case : List[str] = vnp.copy() # same because it is one dimensional _snake_case : Tuple = torch.tensor(a ) elif key_name.endswith("/g" ): _snake_case : int = "model.blocks.%d.feed_forward.norm.weight" % player _snake_case : Tuple = vnp.copy() # same because it is one dimensional _snake_case : Dict = torch.tensor(a ) elif key_name.startswith("model/att" ): _snake_case : Optional[int] = int(key_name[9:].split("/" )[0] ) if key_name.endswith("/qkv/kernel" ): _snake_case : Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum _snake_case : Union[str, Any] = state[:, 0, :, :] _snake_case : Dict = state[:, 1, :, :] _snake_case : List[Any] = state[:, 2, :, :] _snake_case : Tuple = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _snake_case : str = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _snake_case : Tuple = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _snake_case : str = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player _snake_case : List[str] = torch.tensor(a ) _snake_case : List[Any] = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player _snake_case : Tuple = torch.tensor(a ) _snake_case : List[str] = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player _snake_case : Any = torch.tensor(a ) elif key_name.endswith("/o/kernel" ): _snake_case : Optional[Any] = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player _snake_case : Dict = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix _snake_case : Tuple = torch.tensor(a ) elif key_name.startswith("model/an" ): _snake_case : Tuple = int(key_name[8:].split("/" )[0] ) if key_name.endswith("/b" ): _snake_case : Tuple = "model.blocks.%d.self_attn.norm.bias" % player _snake_case : List[str] = vnp.copy() # same because it is one dimensional _snake_case : str = torch.tensor(a ) elif key_name.endswith("/g" ): _snake_case : Union[str, Any] = "model.blocks.%d.self_attn.norm.weight" % player _snake_case : Union[str, Any] = vnp.copy() # same because it is one dimensional _snake_case : Optional[Any] = torch.tensor(a ) elif ( key_name.startswith("model/wte" ) or key_name.startswith("model/wpe" ) or key_name.startswith("model/ete" ) ): _snake_case : Optional[Any] = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[ key_name[-3:] ] _snake_case : List[str] = "model.%s.weight" % nlayer _snake_case : List[str] = vnp.copy() # same in embedded _snake_case : Any = torch.tensor(a ) if key_name.startswith("model/wte" ): _snake_case : Union[str, Any] = "lm_head.weight" _snake_case : Optional[Any] = vnp.copy() # same in embedded _snake_case : Dict = torch.tensor(a ) elif key_name.startswith("model/wob" ): _snake_case : List[Any] = "final_logits_bias" _snake_case : Dict = vnp.copy() # same in embedded _snake_case : Union[str, Any] = state.reshape((1, -1) ) _snake_case : int = torch.tensor(a ) elif key_name == "model/dense/kernel": _snake_case : Optional[Any] = "model.last_project.weight" _snake_case : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _snake_case : List[Any] = torch.tensor(a ) elif key_name == "model/dense_1/bias": _snake_case : int = "model.last_project.bias" _snake_case : Any = vnp.copy() # same because it is one dimensional _snake_case : Optional[int] = torch.tensor(a ) torch.save(a , args.output ) if __name__ == "__main__": _a : Tuple = argparse.ArgumentParser( description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""") parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""") _a : str = parser.parse_args() convert_tf_gptsan_to_pt(args)
717
"""simple docstring""" def a__ ( a : list , a : int , a : int = 0 , a : int = 0 ): """simple docstring""" _snake_case : Optional[int] = right or len(a ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(a , a , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
87
0
"""simple docstring""" from collections import Counter from timeit import timeit def a__ ( a : Optional[Any] = "" , ): """simple docstring""" return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2 def a__ ( a : Optional[Any] = "" ): """simple docstring""" if len(__lowerCAmelCase ) == 0: return True _snake_case : Tuple = input_str.replace(" " , "" ).lower() # character_freq_dict: Stores the frequency of every character in the input string _snake_case : dict[str, int] = {} for character in lower_case_input_str: _snake_case : int = character_freq_dict.get(__lowerCAmelCase , 0 ) + 1 _snake_case : Dict = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def a__ ( a : List[str] = "" ): """simple docstring""" print("\nFor string = " , __lowerCAmelCase , ":" ) print( "> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(__lowerCAmelCase ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) print( "> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(__lowerCAmelCase ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) if __name__ == "__main__": _a : Any = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) _a : List[str] = can_string_be_rearranged_as_palindrome_counter(check_str) print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
718
"""simple docstring""" from __future__ import annotations class _UpperCAmelCase : def __init__( self , snake_case_ , snake_case_ ): _snake_case , _snake_case : Dict = text, pattern _snake_case , _snake_case : int = len(snake_case_ ), len(snake_case_ ) def lowerCamelCase__ ( self , snake_case_ ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def lowerCamelCase__ ( self , snake_case_ ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def lowerCamelCase__ ( self ): # searches pattern in text and returns index positions _snake_case : List[str] = [] for i in range(self.textLen - self.patLen + 1 ): _snake_case : Union[str, Any] = self.mismatch_in_text(snake_case_ ) if mismatch_index == -1: positions.append(snake_case_ ) else: _snake_case : Tuple = self.match_in_pattern(self.text[mismatch_index] ) _snake_case : Tuple = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions _a : List[Any] = """ABAABA""" _a : str = """AB""" _a : List[Any] = BoyerMooreSearch(text, pattern) _a : Any = bms.bad_character_heuristic() if len(positions) == 0: print("""No match found""") else: print("""Pattern found in following positions: """) print(positions)
87
0
"""simple docstring""" import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class _UpperCAmelCase ( unittest.TestCase): def lowerCamelCase__ ( self ): _snake_case : Optional[int] = ["a", "b", "c"] # Defaults to last layer if both are None _snake_case , _snake_case : Optional[Any] = get_aligned_output_features_output_indices(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , ["c"] ) self.assertEqual(lowerCAmelCase_ , [2] ) # Out indices set to match out features _snake_case , _snake_case : Dict = get_aligned_output_features_output_indices(["a", "c"] , lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , ["a", "c"] ) self.assertEqual(lowerCAmelCase_ , [0, 2] ) # Out features set to match out indices _snake_case , _snake_case : int = get_aligned_output_features_output_indices(lowerCAmelCase_ , [0, 2] , lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , ["a", "c"] ) self.assertEqual(lowerCAmelCase_ , [0, 2] ) # Out features selected from negative indices _snake_case , _snake_case : int = get_aligned_output_features_output_indices(lowerCAmelCase_ , [-3, -1] , lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , ["a", "c"] ) self.assertEqual(lowerCAmelCase_ , [-3, -1] ) def lowerCamelCase__ ( self ): with self.assertRaises(lowerCAmelCase_ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , lowerCAmelCase_ ) # Out features must be a list with self.assertRaises(lowerCAmelCase_ ): verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] ) # Out features must be a subset of stage names with self.assertRaises(lowerCAmelCase_ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] ) # Out indices must be a list or tuple with self.assertRaises(lowerCAmelCase_ ): verify_out_features_out_indices(lowerCAmelCase_ , 0 , ["a", "b"] ) # Out indices must be a subset of stage names with self.assertRaises(lowerCAmelCase_ ): verify_out_features_out_indices(lowerCAmelCase_ , (0, 1) , ["a"] ) # Out features and out indices must be the same length with self.assertRaises(lowerCAmelCase_ ): verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] ) # Out features should match out indices with self.assertRaises(lowerCAmelCase_ ): verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] ) # Out features and out indices should be in order with self.assertRaises(lowerCAmelCase_ ): verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] ) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] ) def lowerCamelCase__ ( self ): _snake_case : Dict = BackboneMixin() _snake_case : Optional[Any] = ["a", "b", "c"] _snake_case : Optional[Any] = ["a", "c"] _snake_case : Union[str, Any] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly _snake_case : List[str] = ["a", "b"] self.assertEqual(backbone.out_features , ["a", "b"] ) self.assertEqual(backbone.out_indices , [0, 1] ) _snake_case : List[Any] = [-3, -1] self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [-3, -1] )
719
"""simple docstring""" from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": _a : Dict = input("""Enter image url: """).strip() print(f'Downloading image from {url} ...') _a : str = BeautifulSoup(requests.get(url).content, """html.parser""") # The image URL is in the content field of the first meta tag with property og:image _a : str = soup.find("""meta""", {"""property""": """og:image"""})["""content"""] _a : Dict = requests.get(image_url).content _a : str = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg' with open(file_name, """wb""") as fp: fp.write(image_data) print(f'Done. Image saved to disk as {file_name}.')
87
0
"""simple docstring""" import string from math import logaa def a__ ( a : str , a : str ): """simple docstring""" _snake_case : str = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) _snake_case : int = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def a__ ( a : str , a : str ): """simple docstring""" _snake_case : int = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' _snake_case : Optional[Any] = corpus_without_punctuation.split("\n" ) _snake_case : Any = term.lower() return (len([doc for doc in docs if term in doc] ), len(UpperCamelCase__ )) def a__ ( a : int , a : int , a : Any=False ): """simple docstring""" if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def a__ ( a : int , a : int ): """simple docstring""" return round(tf * idf , 3 )
720
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _a : Optional[int] = { """configuration_pix2struct""": [ """PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Pix2StructConfig""", """Pix2StructTextConfig""", """Pix2StructVisionConfig""", ], """processing_pix2struct""": ["""Pix2StructProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = ["""Pix2StructImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Dict = [ """PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Pix2StructPreTrainedModel""", """Pix2StructForConditionalGeneration""", """Pix2StructVisionModel""", """Pix2StructTextModel""", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys _a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" _a : Union[str, Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" _a : str = [{"type": "code", "content": INSTALL_CONTENT}] _a : Any = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
721
"""simple docstring""" import argparse import json import subprocess def a__ ( a : Optional[Any] , a : Optional[int] ): """simple docstring""" _snake_case : str = [] _snake_case : Optional[Any] = ( f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"' " https://api.github.com/repos/huggingface/transformers/actions/runners" ) _snake_case : Dict = subprocess.run(a , shell=a , stdout=subprocess.PIPE ) _snake_case : Tuple = output.stdout.decode("utf-8" ) _snake_case : List[str] = json.loads(a ) _snake_case : Any = status["runners"] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(a ) # save the result so we can report them on Slack with open("offline_runners.txt" , "w" ) as fp: fp.write(json.dumps(a ) ) if len(a ) > 0: _snake_case : Any = "\n".join([x["name"] for x in offline_runners] ) raise ValueError(f'The following runners are offline:\n{failed}' ) if __name__ == "__main__": def a__ ( a : Optional[int] ): """simple docstring""" return values.split("," ) _a : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--target_runners""", default=None, type=list_str, required=True, help="""Comma-separated list of runners to check status.""", ) parser.add_argument( """--token""", default=None, type=str, required=True, help="""A token that has actions:read permission.""" ) _a : List[str] = parser.parse_args() get_runner_status(args.target_runners, args.token)
87
0
import os from datetime import datetime as dt from github import Github _a : str = [ """good first issue""", """feature request""", """wip""", ] def a__ ( ): """simple docstring""" _snake_case : str = Github(os.environ["GITHUB_TOKEN"] ) _snake_case : str = g.get_repo("huggingface/accelerate" ) _snake_case : Optional[int] = repo.get_issues(state="open" ) for issue in open_issues: _snake_case : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda a : i.created_at , reverse=a ) _snake_case : str = comments[0] if len(a ) > 0 else None _snake_case : List[Any] = dt.utcnow() _snake_case : str = (current_time - issue.updated_at).days _snake_case : List[Any] = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state="closed" ) elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) if __name__ == "__main__": main()
700
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class _UpperCAmelCase ( unittest.TestCase): def lowerCamelCase__ ( self ): _snake_case : List[Any] = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) _snake_case : List[Any] = Vector() def lowerCamelCase__ ( self ): _snake_case : Any = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(snake_case_ ) , "(0,0,0,0,0,1)" ) def lowerCamelCase__ ( self ): _snake_case : Dict = Vector([1, 2, 3, 4] ) self.assertEqual(len(snake_case_ ) , 4 ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = Vector([1, 2] ) _snake_case : List[str] = Vector([1, 2, 3, 4, 5] ) _snake_case : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) _snake_case : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = Vector([1, 2, 3] ) _snake_case : Any = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def lowerCamelCase__ ( self ): _snake_case : str = Vector([1, 2, 3] ) _snake_case : Union[str, Any] = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def lowerCamelCase__ ( self ): _snake_case : Optional[int] = Vector([1, 2, 3] ) _snake_case : List[Any] = Vector([2, -1, 4] ) # for test of dot product _snake_case : Union[str, Any] = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" ) self.assertEqual((a * b) , 0 ) def lowerCamelCase__ ( self ): self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 ) def lowerCamelCase__ ( self ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" ) def lowerCamelCase__ ( self ): _snake_case : Tuple = Vector([1, 2, 3] ) _snake_case : Optional[Any] = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , snake_case_ , snake_case_ ) ) , "(3,4,7)" ) def lowerCamelCase__ ( self ): _snake_case : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] ) _snake_case : Optional[int] = x.copy() self.assertEqual(str(snake_case_ ) , str(snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : Dict = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(snake_case_ ) , "(0,1,0)" ) def lowerCamelCase__ ( self ): _snake_case : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(snake_case_ , snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : Optional[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(snake_case_ , snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def lowerCamelCase__ ( self ): _snake_case : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) _snake_case : List[str] = Vector([1, 2, 3] ) self.assertEqual("(14,32,50)" , str(a * x ) ) self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) ) def lowerCamelCase__ ( self ): _snake_case : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def lowerCamelCase__ ( self ): _snake_case : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) ) def lowerCamelCase__ ( self ): _snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) ) def lowerCamelCase__ ( self ): self.assertEqual( "|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
87
0
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _a : Dict = logging.get_logger(__name__) _a : List[Any] = { """nielsr/canine-s""": 2_048, } # Unicode defines 1,114,112 total “codepoints” _a : Tuple = 1_114_112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _a : Optional[int] = 0 _a : Any = 0Xe0_00 _a : int = 0Xe0_01 _a : Any = 0Xe0_02 _a : List[Any] = 0Xe0_03 _a : str = 0Xe0_04 # Maps special codepoints to human-readable names. _a : str = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: """[CLS]""", SEP: """[SEP]""", BOS: """[BOS]""", MASK: """[MASK]""", PAD: """[PAD]""", RESERVED: """[RESERVED]""", } # Maps special codepoint human-readable names to their codepoint values. _a : List[str] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class _UpperCAmelCase ( _snake_case): __lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , snake_case_=chr(snake_case_ ) , snake_case_=chr(snake_case_ ) , snake_case_=chr(snake_case_ ) , snake_case_=chr(snake_case_ ) , snake_case_=chr(snake_case_ ) , snake_case_=chr(snake_case_ ) , snake_case_=False , snake_case_=20_48 , **snake_case_ , ): _snake_case : List[Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token _snake_case : List[Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token _snake_case : Any = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token _snake_case : Optional[int] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token _snake_case : Optional[Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _snake_case : List[Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token super().__init__( bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , model_max_length=snake_case_ , **snake_case_ , ) # Creates a mapping for looking up the IDs of special symbols. _snake_case : int = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): _snake_case : List[str] = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. _snake_case : Optional[int] = { codepoint: name for name, codepoint in self._special_codepoints.items() } _snake_case : Any = UNICODE_VOCAB_SIZE _snake_case : Optional[int] = len(self._special_codepoints ) @property def lowerCamelCase__ ( self ): return self._unicode_vocab_size def lowerCamelCase__ ( self , snake_case_ ): return list(snake_case_ ) def lowerCamelCase__ ( self , snake_case_ ): try: return ord(snake_case_ ) except TypeError: raise ValueError(F'invalid token: \'{token}\'' ) def lowerCamelCase__ ( self , snake_case_ ): try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(snake_case_ ) except TypeError: raise ValueError(F'invalid id: {index}' ) def lowerCamelCase__ ( self , snake_case_ ): return "".join(snake_case_ ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ): _snake_case : Union[str, Any] = [self.sep_token_id] _snake_case : int = [self.cls_token_id] _snake_case : Union[str, Any] = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ ) _snake_case : int = [1] + ([0] * len(snake_case_ )) + [1] if token_ids_a is not None: result += ([0] * len(snake_case_ )) + [1] return result def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ): _snake_case : Any = [self.sep_token_id] _snake_case : List[str] = [self.cls_token_id] _snake_case : Dict = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ): return ()
701
"""simple docstring""" from __future__ import annotations from collections import namedtuple def a__ ( a : float , a : float , a : float ): """simple docstring""" _snake_case : Optional[Any] = namedtuple("result" , "name value" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("Only one argument must be 0" ) elif power < 0: raise ValueError( "Power cannot be negative in any electrical/electronics system" ) elif voltage == 0: return result("voltage" , power / current ) elif current == 0: return result("current" , power / voltage ) elif power == 0: return result("power" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
87
0
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline _a : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( __snake_case): def __init__( self , snake_case_ , snake_case_ ): super().__init__() self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase ) @torch.no_grad() def __call__( self , snake_case_ = 1 , snake_case_ = 1_00 , snake_case_ = None , snake_case_ = None , snake_case_ = True , ): if audio_length_in_s is None: _snake_case : Tuple = self.unet.config.sample_size / self.unet.config.sample_rate _snake_case : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate _snake_case : Tuple = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( F'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to' F' {3 * down_scale_factor / self.unet.config.sample_rate}.' ) _snake_case : Dict = int(__UpperCamelCase ) if sample_size % down_scale_factor != 0: _snake_case : str = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled' F' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising' " process." ) _snake_case : Dict = int(__UpperCamelCase ) _snake_case : List[Any] = next(iter(self.unet.parameters() ) ).dtype _snake_case : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) _snake_case : Dict = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) # set step values self.scheduler.set_timesteps(__UpperCamelCase , device=audio.device ) _snake_case : Optional[int] = self.scheduler.timesteps.to(__UpperCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output _snake_case : Optional[int] = self.unet(__UpperCamelCase , __UpperCamelCase ).sample # 2. compute previous image: x_t -> t_t-1 _snake_case : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample _snake_case : int = audio.clamp(-1 , 1 ).float().cpu().numpy() _snake_case : str = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=__UpperCamelCase )
702
"""simple docstring""" import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _UpperCAmelCase ( _snake_case , unittest.TestCase): __lowercase : Any = TextToVideoSDPipeline __lowercase : str = TEXT_TO_IMAGE_PARAMS __lowercase : int = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __lowercase : Optional[int] = frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ]) def lowerCamelCase__ ( self ): torch.manual_seed(0 ) _snake_case : str = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , ) _snake_case : List[Any] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , ) torch.manual_seed(0 ) _snake_case : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) _snake_case : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , ) _snake_case : Tuple = CLIPTextModel(snake_case_ ) _snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _snake_case : Any = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def lowerCamelCase__ ( self , snake_case_ , snake_case_=0 ): if str(snake_case_ ).startswith("mps" ): _snake_case : str = torch.manual_seed(snake_case_ ) else: _snake_case : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) _snake_case : str = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def lowerCamelCase__ ( self ): _snake_case : int = "cpu" # ensure determinism for the device-dependent torch.Generator _snake_case : Optional[Any] = self.get_dummy_components() _snake_case : Tuple = TextToVideoSDPipeline(**snake_case_ ) _snake_case : List[str] = sd_pipe.to(snake_case_ ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) _snake_case : int = self.get_dummy_inputs(snake_case_ ) _snake_case : Union[str, Any] = "np" _snake_case : Dict = sd_pipe(**snake_case_ ).frames _snake_case : Any = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) _snake_case : Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ ( self ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def lowerCamelCase__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=1E-2 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def lowerCamelCase__ ( self ): pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def lowerCamelCase__ ( self ): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): return super().test_progress_bar() @slow @skip_mps class _UpperCAmelCase ( unittest.TestCase): def lowerCamelCase__ ( self ): _snake_case : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" ) _snake_case : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) _snake_case : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) _snake_case : Tuple = pipe.to("cuda" ) _snake_case : List[Any] = "Spiderman is surfing" _snake_case : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) _snake_case : int = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type="pt" ).frames _snake_case : int = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def lowerCamelCase__ ( self ): _snake_case : Any = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" ) _snake_case : str = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) _snake_case : int = pipe.to("cuda" ) _snake_case : Any = "Spiderman is surfing" _snake_case : str = torch.Generator(device="cpu" ).manual_seed(0 ) _snake_case : Any = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="pt" ).frames _snake_case : Optional[int] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
87
0
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() _a : Dict = logging.get_logger(__name__) _a : Any = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } _a : Optional[int] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def a__ ( a : Optional[Any] ): """simple docstring""" _snake_case : Tuple = {} with open(lowerCAmelCase_ , "r" ) as file: for line_number, line in enumerate(lowerCAmelCase_ ): _snake_case : Any = line.strip() if line: _snake_case : List[Any] = line.split() _snake_case : List[Any] = line_number _snake_case : Optional[int] = words[0] _snake_case : int = value return result def a__ ( a : int , a : str , a : int , a : Tuple , a : Optional[Any] ): """simple docstring""" for attribute in key.split("." ): _snake_case : Optional[int] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Union[str, Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(lowerCAmelCase_ ): _snake_case : Optional[int] = PARAM_MAPPING[full_name.split("." )[-1]] _snake_case : Dict = '''param''' if weight_type is not None and weight_type != "param": _snake_case : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape elif weight_type is not None and weight_type == "param": _snake_case : int = hf_pointer for attribute in hf_param_name.split("." ): _snake_case : str = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Optional[int] = shape_pointer.shape # let's reduce dimension _snake_case : Optional[int] = value[0] else: _snake_case : Optional[Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": _snake_case : Any = value elif weight_type == "weight_g": _snake_case : Dict = value elif weight_type == "weight_v": _snake_case : Dict = value elif weight_type == "bias": _snake_case : Optional[Any] = value elif weight_type == "param": for attribute in hf_param_name.split("." ): _snake_case : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Optional[Any] = value else: _snake_case : Tuple = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def a__ ( a : Dict , a : Any , a : List[Any] , a : Tuple , a : Any ): """simple docstring""" _snake_case : str = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(lowerCAmelCase_ ): _snake_case : int = PARAM_MAPPING[full_name.split("." )[-1]] _snake_case : Dict = '''param''' if weight_type is not None and weight_type != "param": _snake_case : str = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": _snake_case : int = '''.'''.join([key, hf_param_name] ) else: _snake_case : List[str] = key _snake_case : Optional[int] = value if '''lm_head''' in full_key else value[0] _a : Optional[int] = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def a__ ( a : Any , a : Tuple , a : str=None , a : List[str]=None ): """simple docstring""" _snake_case : Optional[Any] = False for key, mapped_key in MAPPING.items(): _snake_case : List[Any] = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _snake_case : Dict = True if "*" in mapped_key: _snake_case : Any = name.split(lowerCAmelCase_ )[0].split("." )[-2] _snake_case : str = mapped_key.replace("*" , lowerCAmelCase_ ) if "weight_g" in name: _snake_case : str = '''weight_g''' elif "weight_v" in name: _snake_case : List[Any] = '''weight_v''' elif "bias" in name: _snake_case : List[Any] = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj _snake_case : int = '''weight''' else: _snake_case : Optional[int] = None if hf_dict is not None: rename_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return is_used return is_used def a__ ( a : int , a : Any , a : int ): """simple docstring""" _snake_case : str = [] _snake_case : Optional[int] = fairseq_model.state_dict() _snake_case : Optional[int] = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): _snake_case : Dict = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == "group" , ) _snake_case : Optional[int] = True else: _snake_case : Optional[Any] = load_wavaveca_layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if not is_used: unused_weights.append(lowerCAmelCase_ ) logger.warning(f'Unused weights: {unused_weights}' ) def a__ ( a : Union[str, Any] , a : Optional[Any] , a : Any , a : Dict , a : Optional[int] ): """simple docstring""" _snake_case : List[str] = full_name.split("conv_layers." )[-1] _snake_case : Tuple = name.split("." ) _snake_case : int = int(items[0] ) _snake_case : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) _snake_case : Dict = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) _snake_case : Dict = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) _snake_case : int = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) _snake_case : List[str] = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(lowerCAmelCase_ ) @torch.no_grad() def a__ ( a : List[Any] , a : Any , a : Optional[int]=None , a : Tuple=None , a : List[Any]=True , a : Union[str, Any]=False ): """simple docstring""" if config_path is not None: _snake_case : Dict = WavaVecaConfig.from_pretrained(lowerCAmelCase_ ) else: _snake_case : Tuple = WavaVecaConfig() if is_seq_class: _snake_case : List[str] = read_txt_into_dict(lowerCAmelCase_ ) _snake_case : Optional[Any] = idalabel _snake_case : Dict = WavaVecaForSequenceClassification(lowerCAmelCase_ ) _snake_case : List[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) feature_extractor.save_pretrained(lowerCAmelCase_ ) elif is_finetuned: if dict_path: _snake_case : int = Dictionary.load(lowerCAmelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _snake_case : Optional[int] = target_dict.pad_index _snake_case : Tuple = target_dict.bos_index _snake_case : Dict = target_dict.eos_index _snake_case : Dict = len(target_dict.symbols ) _snake_case : List[Any] = os.path.join(lowerCAmelCase_ , "vocab.json" ) if not os.path.isdir(lowerCAmelCase_ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase_ ) ) return os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) _snake_case : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched _snake_case : Any = 0 _snake_case : List[str] = 1 with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : str = WavaVecaCTCTokenizer( lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase_ , ) _snake_case : Any = True if config.feat_extract_norm == '''layer''' else False _snake_case : Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) _snake_case : Any = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) _snake_case : List[Any] = WavaVecaForCTC(lowerCAmelCase_ ) else: _snake_case : str = WavaVecaForPreTraining(lowerCAmelCase_ ) if is_finetuned or is_seq_class: _snake_case : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _snake_case : Tuple = argparse.Namespace(task="audio_pretraining" ) _snake_case : str = fairseq.tasks.setup_task(lowerCAmelCase_ ) _snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ ) _snake_case : Any = model[0].eval() recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned ) hf_wavavec.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": _a : str = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) _a : Optional[int] = parser.parse_args() _a : Optional[Any] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
703
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class _UpperCAmelCase ( _snake_case): __lowercase : int = """EncodecFeatureExtractor""" __lowercase : str = ("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self , snake_case_ , snake_case_ ): super().__init__(snake_case_ , snake_case_ ) _snake_case : Dict = self.feature_extractor _snake_case : Any = False def lowerCamelCase__ ( self , snake_case_=None , snake_case_=None , snake_case_=True ): return self.tokenizer.get_decoder_prompt_ids(task=snake_case_ , language=snake_case_ , no_timestamps=snake_case_ ) def __call__( self , *snake_case_ , **snake_case_ ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*snake_case_ , **snake_case_ ) _snake_case : str = kwargs.pop("audio" , snake_case_ ) _snake_case : Optional[int] = kwargs.pop("sampling_rate" , snake_case_ ) _snake_case : Optional[Any] = kwargs.pop("text" , snake_case_ ) if len(snake_case_ ) > 0: _snake_case : Any = args[0] _snake_case : Union[str, Any] = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if text is not None: _snake_case : Any = self.tokenizer(snake_case_ , **snake_case_ ) if audio is not None: _snake_case : Any = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ ) if audio is None: return inputs elif text is None: return audio_inputs else: _snake_case : str = audio_inputs["input_values"] if "padding_mask" in audio_inputs: _snake_case : List[str] = audio_inputs["padding_mask"] return inputs def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ): _snake_case : Tuple = kwargs.pop("audio" , snake_case_ ) _snake_case : List[str] = kwargs.pop("padding_mask" , snake_case_ ) if len(snake_case_ ) > 0: _snake_case : Tuple = args[0] _snake_case : Dict = args[1:] if audio_values is not None: return self._decode_audio(snake_case_ , padding_mask=snake_case_ ) else: return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ ) def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ): return self.tokenizer.decode(*snake_case_ , **snake_case_ ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ): _snake_case : Optional[int] = to_numpy(snake_case_ ) _snake_case , _snake_case , _snake_case : Tuple = audio_values.shape if padding_mask is None: return list(snake_case_ ) _snake_case : Optional[int] = to_numpy(snake_case_ ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) _snake_case : Any = seq_len - padding_mask.shape[-1] _snake_case : Optional[Any] = 1 - self.feature_extractor.padding_value _snake_case : Optional[int] = np.pad(snake_case_ , ((0, 0), (0, difference)) , "constant" , constant_values=snake_case_ ) _snake_case : Any = audio_values.tolist() for i in range(snake_case_ ): _snake_case : Tuple = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] _snake_case : Tuple = sliced_audio.reshape(snake_case_ , -1 ) return audio_values
87
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def a__ ( a : Any ): """simple docstring""" _snake_case : int = SwinvaConfig() _snake_case : Union[str, Any] = swinva_name.split("_" ) _snake_case : List[Any] = name_split[1] if "to" in name_split[3]: _snake_case : Union[str, Any] = int(name_split[3][-3:] ) else: _snake_case : Optional[int] = int(name_split[3] ) if "to" in name_split[2]: _snake_case : Dict = int(name_split[2][-2:] ) else: _snake_case : int = int(name_split[2][6:] ) if model_size == "tiny": _snake_case : str = 96 _snake_case : Dict = (2, 2, 6, 2) _snake_case : int = (3, 6, 12, 24) elif model_size == "small": _snake_case : Tuple = 96 _snake_case : Optional[Any] = (2, 2, 18, 2) _snake_case : Optional[int] = (3, 6, 12, 24) elif model_size == "base": _snake_case : Dict = 128 _snake_case : str = (2, 2, 18, 2) _snake_case : List[Any] = (4, 8, 16, 32) else: _snake_case : Tuple = 192 _snake_case : Tuple = (2, 2, 18, 2) _snake_case : Optional[Any] = (6, 12, 24, 48) if "to" in swinva_name: _snake_case : str = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): _snake_case : str = 21_841 _snake_case : str = """huggingface/label-files""" _snake_case : List[str] = """imagenet-22k-id2label.json""" _snake_case : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) ) _snake_case : Optional[int] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} _snake_case : int = idalabel _snake_case : Optional[Any] = {v: k for k, v in idalabel.items()} else: _snake_case : int = 1_000 _snake_case : List[Any] = """huggingface/label-files""" _snake_case : List[Any] = """imagenet-1k-id2label.json""" _snake_case : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) ) _snake_case : Union[str, Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} _snake_case : Dict = idalabel _snake_case : Optional[int] = {v: k for k, v in idalabel.items()} _snake_case : Dict = img_size _snake_case : int = num_classes _snake_case : str = embed_dim _snake_case : Dict = depths _snake_case : int = num_heads _snake_case : Optional[Any] = window_size return config def a__ ( a : Any ): """simple docstring""" if "patch_embed.proj" in name: _snake_case : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: _snake_case : Union[str, Any] = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: _snake_case : List[str] = """encoder.""" + name if "attn.proj" in name: _snake_case : Dict = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: _snake_case : Dict = name.replace("attn" , "attention.self" ) if "norm1" in name: _snake_case : List[Any] = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: _snake_case : Dict = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: _snake_case : int = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: _snake_case : List[Any] = name.replace("mlp.fc2" , "output.dense" ) if "q_bias" in name: _snake_case : Optional[Any] = name.replace("q_bias" , "query.bias" ) if "k_bias" in name: _snake_case : List[Any] = name.replace("k_bias" , "key.bias" ) if "v_bias" in name: _snake_case : int = name.replace("v_bias" , "value.bias" ) if "cpb_mlp" in name: _snake_case : str = name.replace("cpb_mlp" , "continuous_position_bias_mlp" ) if name == "norm.weight": _snake_case : Tuple = """layernorm.weight""" if name == "norm.bias": _snake_case : int = """layernorm.bias""" if "head" in name: _snake_case : Union[str, Any] = name.replace("head" , "classifier" ) else: _snake_case : Optional[Any] = """swinv2.""" + name return name def a__ ( a : Union[str, Any] , a : Union[str, Any] ): """simple docstring""" for key in orig_state_dict.copy().keys(): _snake_case : Tuple = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "mask" in key: continue elif "qkv" in key: _snake_case : List[Any] = key.split("." ) _snake_case : str = int(key_split[1] ) _snake_case : Optional[Any] = int(key_split[3] ) _snake_case : List[Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: _snake_case : Any = val[:dim, :] _snake_case : Optional[Any] = val[dim : dim * 2, :] _snake_case : int = val[-dim:, :] else: _snake_case : List[str] = val[:dim] _snake_case : Tuple = val[ dim : dim * 2 ] _snake_case : Union[str, Any] = val[-dim:] else: _snake_case : int = val return orig_state_dict def a__ ( a : Tuple , a : int ): """simple docstring""" _snake_case : Optional[Any] = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ ) timm_model.eval() _snake_case : Any = get_swinva_config(SCREAMING_SNAKE_CASE__ ) _snake_case : List[str] = SwinvaForImageClassification(SCREAMING_SNAKE_CASE__ ) model.eval() _snake_case : Any = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) _snake_case : Any = """http://images.cocodataset.org/val2017/000000039769.jpg""" _snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) ) _snake_case : Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) _snake_case : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ) _snake_case : Optional[int] = timm_model(inputs["pixel_values"] ) _snake_case : List[str] = model(**SCREAMING_SNAKE_CASE__ ).logits assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) model.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="nandwalritik" , commit_message="Add model" , ) if __name__ == "__main__": _a : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swinv2_name""", default="""swinv2_tiny_patch4_window8_256""", type=str, help="""Name of the Swinv2 timm model you\'d like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) _a : Dict = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
704
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _a : str = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[Any] = ["""YolosFeatureExtractor"""] _a : List[Any] = ["""YolosImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Union[str, Any] = [ """YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""", """YolosForObjectDetection""", """YolosModel""", """YolosPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys _a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _a : List[str] = logging.get_logger(__name__) def a__ ( a : str ): """simple docstring""" _snake_case : int = SwinConfig.from_pretrained( "microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] ) _snake_case : Tuple = MaskFormerConfig(backbone_config=a_ ) _snake_case : Tuple = '''huggingface/label-files''' if "ade20k-full" in model_name: # this should be ok _snake_case : Any = 847 _snake_case : List[Any] = '''maskformer-ade20k-full-id2label.json''' elif "ade" in model_name: # this should be ok _snake_case : Union[str, Any] = 150 _snake_case : str = '''ade20k-id2label.json''' elif "coco-stuff" in model_name: # this should be ok _snake_case : List[Any] = 171 _snake_case : str = '''maskformer-coco-stuff-id2label.json''' elif "coco" in model_name: # TODO _snake_case : Optional[Any] = 133 _snake_case : Tuple = '''coco-panoptic-id2label.json''' elif "cityscapes" in model_name: # this should be ok _snake_case : Optional[Any] = 19 _snake_case : List[Any] = '''cityscapes-id2label.json''' elif "vistas" in model_name: # this should be ok _snake_case : str = 65 _snake_case : Optional[Any] = '''mapillary-vistas-id2label.json''' _snake_case : Tuple = json.load(open(hf_hub_download(a_ , a_ , repo_type="dataset" ) , "r" ) ) _snake_case : Union[str, Any] = {int(a_ ): v for k, v in idalabel.items()} return config def a__ ( a : Optional[Any] ): """simple docstring""" _snake_case : Any = [] # stem # fmt: off rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") ) rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((f'backbone.layers.{i}.downsample.reduction.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((f'backbone.layers.{i}.downsample.norm.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((f'backbone.layers.{i}.downsample.norm.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((f'backbone.norm{i}.weight', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') ) rename_keys.append((f'backbone.norm{i}.bias', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') ) # FPN rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") ) rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") ) rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((f'sem_seg_head.adapter_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') ) rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') ) rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') ) rename_keys.append((f'sem_seg_head.layer_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') ) rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') ) rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') ) rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") ) rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') ) # cross-attention out projection rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') ) # MLP 1 rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', f'model.transformer_module.decoder.layers.{idx}.fc1.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', f'model.transformer_module.decoder.layers.{idx}.fc1.bias') ) # MLP 2 rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', f'model.transformer_module.decoder.layers.{idx}.fc2.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', f'model.transformer_module.decoder.layers.{idx}.fc2.bias') ) # layernorm 1 (self-attention layernorm) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') ) # layernorm 3 (final layernorm) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') ) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") ) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") ) # heads on top rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") ) rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") ) rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") ) rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") ) rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") ) for i in range(3 ): rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.weight', f'mask_embedder.{i}.0.weight') ) rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.bias', f'mask_embedder.{i}.0.bias') ) # fmt: on return rename_keys def a__ ( a : str , a : List[str] , a : List[Any] ): """simple docstring""" _snake_case : Optional[int] = dct.pop(a_ ) _snake_case : int = val def a__ ( a : Union[str, Any] , a : List[str] ): """simple docstring""" _snake_case : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _snake_case : str = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _snake_case : Union[str, Any] = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' ) _snake_case : Tuple = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : Dict = in_proj_weight[:dim, :] _snake_case : str = in_proj_bias[: dim] _snake_case : Any = in_proj_weight[ dim : dim * 2, : ] _snake_case : List[Any] = in_proj_bias[ dim : dim * 2 ] _snake_case : Union[str, Any] = in_proj_weight[ -dim :, : ] _snake_case : Optional[int] = in_proj_bias[-dim :] # fmt: on def a__ ( a : Tuple , a : Optional[int] ): """simple docstring""" _snake_case : Dict = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) _snake_case : str = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' ) _snake_case : List[str] = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : Any = in_proj_weight[: hidden_size, :] _snake_case : Optional[Any] = in_proj_bias[:config.hidden_size] _snake_case : Any = in_proj_weight[hidden_size : hidden_size * 2, :] _snake_case : Tuple = in_proj_bias[hidden_size : hidden_size * 2] _snake_case : Union[str, Any] = in_proj_weight[-hidden_size :, :] _snake_case : int = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) _snake_case : Optional[Any] = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' ) _snake_case : List[str] = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : List[Any] = in_proj_weight[: hidden_size, :] _snake_case : List[str] = in_proj_bias[:config.hidden_size] _snake_case : Optional[int] = in_proj_weight[hidden_size : hidden_size * 2, :] _snake_case : Any = in_proj_bias[hidden_size : hidden_size * 2] _snake_case : List[str] = in_proj_weight[-hidden_size :, :] _snake_case : str = in_proj_bias[-hidden_size :] # fmt: on def a__ ( ): """simple docstring""" _snake_case : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _snake_case : Union[str, Any] = Image.open(requests.get(a_ , stream=a_ ).raw ) return im @torch.no_grad() def a__ ( a : str , a : str , a : str , a : bool = False ): """simple docstring""" _snake_case : Dict = get_maskformer_config(a_ ) # load original state_dict with open(a_ , "rb" ) as f: _snake_case : str = pickle.load(a_ ) _snake_case : int = data['''model'''] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys _snake_case : Union[str, Any] = create_rename_keys(a_ ) for src, dest in rename_keys: rename_key(a_ , a_ , a_ ) read_in_swin_q_k_v(a_ , config.backbone_config ) read_in_decoder_q_k_v(a_ , a_ ) # update to torch tensors for key, value in state_dict.items(): _snake_case : Any = torch.from_numpy(a_ ) # load 🤗 model _snake_case : str = MaskFormerForInstanceSegmentation(a_ ) model.eval() for name, param in model.named_parameters(): print(a_ , param.shape ) _snake_case : str = model.load_state_dict(a_ , strict=a_ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(a_ ) == 0, f'Unexpected keys: {unexpected_keys}' # verify results _snake_case : Dict = prepare_img() if "vistas" in model_name: _snake_case : Dict = 65 elif "cityscapes" in model_name: _snake_case : List[str] = 65_535 else: _snake_case : Optional[Any] = 255 _snake_case : str = True if '''ade''' in model_name else False _snake_case : Tuple = MaskFormerImageProcessor(ignore_index=a_ , reduce_labels=a_ ) _snake_case : int = image_processor(a_ , return_tensors="pt" ) _snake_case : List[str] = model(**a_ ) print("Logits:" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": _snake_case : List[Any] = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , a_ , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'Saving model and image processor to {pytorch_dump_folder_path}' ) Path(a_ ).mkdir(exist_ok=a_ ) model.save_pretrained(a_ ) image_processor.save_pretrained(a_ ) if push_to_hub: print("Pushing model and image processor to the hub..." ) model.push_to_hub(f'nielsr/{model_name}' ) image_processor.push_to_hub(f'nielsr/{model_name}' ) if __name__ == "__main__": _a : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""maskformer-swin-tiny-ade""", type=str, help=("""Name of the MaskFormer model you'd like to convert""",), ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""", type=str, help="""Path to the original state dict (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _a : Optional[Any] = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
705
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Optional[int] = dataset _snake_case : str = process _snake_case : int = params def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): _snake_case : Union[str, Any] = self.dataset[i] _snake_case : Optional[Any] = self.process(snake_case_ , **self.params ) return processed class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ): _snake_case : Union[str, Any] = loader _snake_case : Tuple = infer _snake_case : List[Any] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _snake_case : int = None _snake_case : int = loader_batch_size # Internal bookkeeping _snake_case : Any = None _snake_case : Dict = None def __len__( self ): return len(self.loader ) def __iter__( self ): _snake_case : int = iter(self.loader ) return self def lowerCamelCase__ ( self ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _snake_case : List[Any] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _snake_case : int = {} for k, element in self._loader_batch_data.items(): if isinstance(snake_case_ , snake_case_ ): # Convert ModelOutput to tuple first _snake_case : Tuple = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _snake_case : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _snake_case : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case_ , snake_case_ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _snake_case : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _snake_case : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _snake_case : Tuple = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _snake_case : Union[str, Any] = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _snake_case : List[Any] = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _snake_case : int = self._loader_batch_data.__class__(snake_case_ ) self._loader_batch_index += 1 return result def lowerCamelCase__ ( self ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _snake_case : Tuple = next(self.iterator ) _snake_case : Any = self.infer(snake_case_ , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(snake_case_ , torch.Tensor ): _snake_case : Union[str, Any] = processed else: _snake_case : Optional[int] = list(processed.keys() )[0] _snake_case : List[str] = processed[key] if isinstance(snake_case_ , snake_case_ ): _snake_case : Dict = len(snake_case_ ) else: _snake_case : Optional[int] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _snake_case : Union[str, Any] = observed_batch_size # Setting internal index to unwrap the batch _snake_case : str = processed _snake_case : List[Any] = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ): super().__init__(snake_case_ , snake_case_ , snake_case_ ) def __iter__( self ): _snake_case : Tuple = iter(self.loader ) _snake_case : List[Any] = None return self def lowerCamelCase__ ( self ): if self.subiterator is None: _snake_case : Optional[Any] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _snake_case : Union[str, Any] = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _snake_case : str = self.infer(next(self.iterator ) , **self.params ) _snake_case : Tuple = next(self.subiterator ) return processed class _UpperCAmelCase ( _snake_case): def __iter__( self ): _snake_case : Optional[Any] = iter(self.loader ) return self def lowerCamelCase__ ( self ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. _snake_case : Optional[Any] = False _snake_case : Tuple = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _snake_case : Union[str, Any] = self.loader_batch_item() _snake_case : str = item.pop("is_last" ) accumulator.append(snake_case_ ) if is_last: return accumulator while not is_last: _snake_case : List[str] = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(snake_case_ , torch.Tensor ): _snake_case : Union[str, Any] = processed else: _snake_case : Tuple = list(processed.keys() )[0] _snake_case : Tuple = processed[key] if isinstance(snake_case_ , snake_case_ ): _snake_case : Any = len(snake_case_ ) else: _snake_case : List[Any] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _snake_case : Dict = observed_batch_size _snake_case : List[Any] = processed _snake_case : List[str] = 0 while self._loader_batch_index < self.loader_batch_size: _snake_case : Union[str, Any] = self.loader_batch_item() _snake_case : int = item.pop("is_last" ) accumulator.append(snake_case_ ) if is_last: return accumulator else: _snake_case : Dict = processed _snake_case : Dict = item.pop("is_last" ) accumulator.append(snake_case_ ) return accumulator class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ ): _snake_case : str = dataset _snake_case : Any = key def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): return self.dataset[i][self.key] class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : int = dataset _snake_case : Any = keya _snake_case : int = keya def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
87
0
"""simple docstring""" import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCamelCase_ : str = logging.get_logger(__name__) class _UpperCAmelCase ( enum.Enum): __lowercase : int = 0 __lowercase : Optional[int] = 1 @add_end_docstrings(_snake_case) class _UpperCAmelCase ( _snake_case): __lowercase : List[Any] = "generated" def __init__( self , *snake_case_ , **snake_case_ ): super().__init__(*snake_case_ , **snake_case_ ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def lowerCamelCase__ ( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ , ): _snake_case : Dict = {} if truncation is not None: _snake_case : Tuple = truncation _snake_case : Tuple = generate_kwargs _snake_case : Tuple = {} if return_tensors is not None and return_type is None: _snake_case : str = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: _snake_case : Dict = return_type if clean_up_tokenization_spaces is not None: _snake_case : Optional[int] = clean_up_tokenization_spaces if stop_sequence is not None: _snake_case : Tuple = self.tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) if len(snake_case_ ) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim." ) _snake_case : str = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ): return True def lowerCamelCase__ ( self , *snake_case_ , snake_case_ ): _snake_case : Optional[Any] = self.model.config.prefix if self.model.config.prefix is not None else "" if isinstance(args[0] , snake_case_ ): if self.tokenizer.pad_token_id is None: raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" ) _snake_case : Dict = ([prefix + arg for arg in args[0]],) _snake_case : List[str] = True elif isinstance(args[0] , snake_case_ ): _snake_case : Union[str, Any] = (prefix + args[0],) _snake_case : str = False else: raise ValueError( F' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' ) _snake_case : Any = self.tokenizer(*snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self , *snake_case_ , **snake_case_ ): _snake_case : int = super().__call__(*snake_case_ , **snake_case_ ) if ( isinstance(args[0] , snake_case_ ) and all(isinstance(snake_case_ , snake_case_ ) for el in args[0] ) and all(len(snake_case_ ) == 1 for res in result ) ): return [res[0] for res in result] return result def lowerCamelCase__ ( self , snake_case_ , snake_case_=TruncationStrategy.DO_NOT_TRUNCATE , **snake_case_ ): _snake_case : Optional[Any] = self._parse_and_tokenize(snake_case_ , truncation=snake_case_ , **snake_case_ ) return inputs def lowerCamelCase__ ( self , snake_case_ , **snake_case_ ): if self.framework == "pt": _snake_case , _snake_case : Tuple = model_inputs["input_ids"].shape elif self.framework == "tf": _snake_case , _snake_case : List[str] = tf.shape(model_inputs["input_ids"] ).numpy() _snake_case : Tuple = generate_kwargs.get("min_length" , self.model.config.min_length ) _snake_case : Union[str, Any] = generate_kwargs.get("max_length" , self.model.config.max_length ) self.check_inputs(snake_case_ , generate_kwargs["min_length"] , generate_kwargs["max_length"] ) _snake_case : List[Any] = self.model.generate(**snake_case_ , **snake_case_ ) _snake_case : List[str] = output_ids.shape[0] if self.framework == "pt": _snake_case : List[Any] = output_ids.reshape(snake_case_ , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": _snake_case : str = tf.reshape(snake_case_ , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def lowerCamelCase__ ( self , snake_case_ , snake_case_=ReturnType.TEXT , snake_case_=False ): _snake_case : Any = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: _snake_case : Dict = {F'{self.return_name}_token_ids': output_ids} elif return_type == ReturnType.TEXT: _snake_case : str = { F'{self.return_name}_text': self.tokenizer.decode( snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ , ) } records.append(snake_case_ ) return records @add_end_docstrings(_snake_case) class _UpperCAmelCase ( _snake_case): __lowercase : List[str] = "summary" def __call__( self , *snake_case_ , **snake_case_ ): return super().__call__(*snake_case_ , **snake_case_ ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ): if max_length < min_length: logger.warning(F'Your min_length={min_length} must be inferior than your max_length={max_length}.' ) if input_length < max_length: logger.warning( F'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is ' "a summarization task, where outputs shorter than the input are typically wanted, you might " F'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' ) @add_end_docstrings(_snake_case) class _UpperCAmelCase ( _snake_case): __lowercase : Any = "translation" def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ): if input_length > 0.9 * max_length: logger.warning( F'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider ' "increasing your max_length manually, e.g. translator(\'...\', max_length=400)" ) return True def lowerCamelCase__ ( self , *snake_case_ , snake_case_=TruncationStrategy.DO_NOT_TRUNCATE , snake_case_=None , snake_case_=None ): if getattr(self.tokenizer , "_build_translation_inputs" , snake_case_ ): return self.tokenizer._build_translation_inputs( *snake_case_ , return_tensors=self.framework , truncation=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ ) else: return super()._parse_and_tokenize(*snake_case_ , truncation=snake_case_ ) def lowerCamelCase__ ( self , snake_case_=None , snake_case_=None , **snake_case_ ): _snake_case , _snake_case , _snake_case : str = super()._sanitize_parameters(**snake_case_ ) if src_lang is not None: _snake_case : Tuple = src_lang if tgt_lang is not None: _snake_case : List[Any] = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. _snake_case : Any = kwargs.get("task" , self.task ) _snake_case : Dict = task.split("_" ) if task and len(snake_case_ ) == 4: # translation, XX, to YY _snake_case : List[str] = items[1] _snake_case : Optional[int] = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self , *snake_case_ , **snake_case_ ): return super().__call__(*snake_case_ , **snake_case_ )
706
"""simple docstring""" def a__ ( a : int ): """simple docstring""" if not isinstance(a , a ): raise TypeError("Input value must be an 'int' type" ) _snake_case : Union[str, Any] = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
87
0
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Any = logging.get_logger(__name__) _a : Any = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class _UpperCAmelCase ( _UpperCamelCase): __lowercase : List[str] = "altclip_text_model" def __init__( self , snake_case_=25_00_02 , snake_case_=10_24 , snake_case_=24 , snake_case_=16 , snake_case_=40_96 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_14 , snake_case_=1 , snake_case_=0.02 , snake_case_=0.02 , snake_case_=1E-05 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=7_68 , **snake_case_ , ): super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) _snake_case : Union[str, Any] = vocab_size _snake_case : Optional[int] = hidden_size _snake_case : Tuple = num_hidden_layers _snake_case : Tuple = num_attention_heads _snake_case : int = hidden_act _snake_case : Any = intermediate_size _snake_case : Union[str, Any] = hidden_dropout_prob _snake_case : Dict = attention_probs_dropout_prob _snake_case : List[str] = max_position_embeddings _snake_case : List[str] = type_vocab_size _snake_case : Dict = initializer_range _snake_case : List[str] = initializer_factor _snake_case : Union[str, Any] = layer_norm_eps _snake_case : Tuple = position_embedding_type _snake_case : Optional[Any] = use_cache _snake_case : Optional[int] = project_dim class _UpperCAmelCase ( _UpperCamelCase): __lowercase : List[str] = "altclip_vision_model" def __init__( self , snake_case_=7_68 , snake_case_=30_72 , snake_case_=5_12 , snake_case_=12 , snake_case_=12 , snake_case_=3 , snake_case_=2_24 , snake_case_=32 , snake_case_="quick_gelu" , snake_case_=1E-5 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1.0 , **snake_case_ , ): super().__init__(**__a ) _snake_case : int = hidden_size _snake_case : Optional[int] = intermediate_size _snake_case : Any = projection_dim _snake_case : Tuple = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : List[Any] = num_channels _snake_case : Dict = patch_size _snake_case : Any = image_size _snake_case : List[str] = initializer_range _snake_case : List[str] = initializer_factor _snake_case : Optional[Any] = attention_dropout _snake_case : Union[str, Any] = layer_norm_eps _snake_case : Dict = hidden_act @classmethod def lowerCamelCase__ ( cls , snake_case_ , **snake_case_ ): cls._set_token_in_kwargs(__a ) _snake_case : int = cls.get_config_dict(__a , **__a ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get("model_type" ) == "altclip": _snake_case : Tuple = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(__a , **__a ) class _UpperCAmelCase ( _UpperCamelCase): __lowercase : Any = "altclip" __lowercase : List[str] = True def __init__( self , snake_case_=None , snake_case_=None , snake_case_=7_68 , snake_case_=2.6592 , **snake_case_ ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). _snake_case : Tuple = kwargs.pop("text_config_dict" , __a ) _snake_case : Tuple = kwargs.pop("vision_config_dict" , __a ) super().__init__(**__a ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: _snake_case : int = {} # This is the complete result when using `text_config_dict`. _snake_case : Any = AltCLIPTextConfig(**__a ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: _snake_case : Optional[int] = ( F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. ' F'The value `text_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: _snake_case : Union[str, Any] = ( F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ' F'value `text_config["{key}"]` will be overriden.' ) logger.warning(__a ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: _snake_case : int = {} # This is the complete result when using `vision_config_dict`. _snake_case : List[Any] = AltCLIPVisionConfig(**__a ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: _snake_case : Dict = { str(__a ): value for key, value in _vision_config_dict["id2label"].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: _snake_case : Optional[int] = ( F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different ' F'values. The value `vision_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: _snake_case : Optional[Any] = ( F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ' F'The value `vision_config["{key}"]` will be overriden.' ) logger.warning(__a ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: _snake_case : Optional[Any] = {} logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." ) if vision_config is None: _snake_case : Dict = {} logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." ) _snake_case : Union[str, Any] = AltCLIPTextConfig(**__a ) _snake_case : Dict = AltCLIPVisionConfig(**__a ) _snake_case : Any = projection_dim _snake_case : Union[str, Any] = logit_scale_init_value _snake_case : Optional[Any] = 1.0 @classmethod def lowerCamelCase__ ( cls , snake_case_ , snake_case_ , **snake_case_ ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a ) def lowerCamelCase__ ( self ): _snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ ) _snake_case : List[str] = self.text_config.to_dict() _snake_case : int = self.vision_config.to_dict() _snake_case : Optional[int] = self.__class__.model_type return output
707
"""simple docstring""" from __future__ import annotations import requests _a : List[str] = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def a__ ( a : str , a : int = 1 , a : str = "new" , a : list | None = None ): """simple docstring""" _snake_case : Any = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(a ) - valid_terms ) ): _snake_case : Optional[int] = f'Invalid search term: {invalid_search_terms}' raise ValueError(a ) _snake_case : int = requests.get( f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , ) if response.status_code == 429: raise requests.HTTPError _snake_case : Optional[Any] = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(a )} _snake_case : Tuple = {} for id_ in range(a ): _snake_case : List[str] = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
87
0
"""simple docstring""" from __future__ import annotations _a : str = [True] * 1_000_001 _a : Optional[Any] = 2 while i * i <= 1_000_000: if seive[i]: for j in range(i * i, 1_000_001, i): _a : Tuple = False i += 1 def a__ ( a : str ): """simple docstring""" return seive[n] def a__ ( a : Tuple ): """simple docstring""" return any(digit in "02468" for digit in str(__UpperCamelCase ) ) def a__ ( a : Any = 1_000_000 ): """simple docstring""" _snake_case : Any = [2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(__UpperCamelCase ) and not contains_an_even_digit(__UpperCamelCase ): _snake_case : int = str(__UpperCamelCase ) _snake_case : Union[str, Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__UpperCamelCase ) )] if all(is_prime(__UpperCamelCase ) for i in list_nums ): result.append(__UpperCamelCase ) return result def a__ ( ): """simple docstring""" return len(find_circular_primes() ) if __name__ == "__main__": print(f'{len(find_circular_primes()) = }')
708
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def a__ ( a : float , a : float , a : bool = False ): """simple docstring""" if radian_mode: return [magnitude * cos(a ), magnitude * sin(a )] return [magnitude * cos(radians(a ) ), magnitude * sin(radians(a ) )] def a__ ( a : NDArray[floataa] , a : NDArray[floataa] , a : float = 10**-1 ): """simple docstring""" _snake_case : NDArray[floataa] = cross(a , a ) _snake_case : float = sum(a ) return abs(a ) < eps if __name__ == "__main__": # Test to check if it works _a : Tuple = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) _a : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg _a : List[Any] = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) _a : List[Any] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg _a : List[str] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]]) _a : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
87
0
"""simple docstring""" import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _a : Optional[int] = logging.get_logger(__name__) def a__ ( a : str ): """simple docstring""" _snake_case : Any = SwinConfig.from_pretrained( "microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] ) _snake_case : str = MaskFormerConfig(backbone_config=a ) _snake_case : List[Any] = "huggingface/label-files" if "ade20k-full" in model_name: # this should be ok _snake_case : Any = 847 _snake_case : str = "maskformer-ade20k-full-id2label.json" elif "ade" in model_name: # this should be ok _snake_case : Optional[Any] = 150 _snake_case : Tuple = "ade20k-id2label.json" elif "coco-stuff" in model_name: # this should be ok _snake_case : Optional[int] = 171 _snake_case : Tuple = "maskformer-coco-stuff-id2label.json" elif "coco" in model_name: # TODO _snake_case : int = 133 _snake_case : int = "coco-panoptic-id2label.json" elif "cityscapes" in model_name: # this should be ok _snake_case : List[str] = 19 _snake_case : Optional[int] = "cityscapes-id2label.json" elif "vistas" in model_name: # this should be ok _snake_case : Union[str, Any] = 65 _snake_case : Optional[Any] = "mapillary-vistas-id2label.json" _snake_case : Optional[Any] = json.load(open(hf_hub_download(a , a , repo_type="dataset" ) , "r" ) ) _snake_case : List[str] = {int(a ): v for k, v in idalabel.items()} return config def a__ ( a : Dict ): """simple docstring""" _snake_case : Any = [] # stem # fmt: off rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") ) rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((f'backbone.layers.{i}.downsample.reduction.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((f'backbone.layers.{i}.downsample.norm.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((f'backbone.layers.{i}.downsample.norm.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((f'backbone.norm{i}.weight', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') ) rename_keys.append((f'backbone.norm{i}.bias', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') ) # FPN rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") ) rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") ) rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((f'sem_seg_head.adapter_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') ) rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') ) rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') ) rename_keys.append((f'sem_seg_head.layer_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') ) rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') ) rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') ) rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") ) rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') ) # cross-attention out projection rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') ) # MLP 1 rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', f'model.transformer_module.decoder.layers.{idx}.fc1.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', f'model.transformer_module.decoder.layers.{idx}.fc1.bias') ) # MLP 2 rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', f'model.transformer_module.decoder.layers.{idx}.fc2.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', f'model.transformer_module.decoder.layers.{idx}.fc2.bias') ) # layernorm 1 (self-attention layernorm) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') ) # layernorm 3 (final layernorm) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') ) rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') ) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") ) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") ) # heads on top rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") ) rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") ) rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") ) rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") ) rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") ) for i in range(3 ): rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.weight', f'mask_embedder.{i}.0.weight') ) rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.bias', f'mask_embedder.{i}.0.bias') ) # fmt: on return rename_keys def a__ ( a : List[str] , a : int , a : Optional[int] ): """simple docstring""" _snake_case : Tuple = dct.pop(a ) _snake_case : int = val def a__ ( a : int , a : Optional[int] ): """simple docstring""" _snake_case : Tuple = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _snake_case : Optional[Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _snake_case : Union[str, Any] = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' ) _snake_case : Tuple = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : str = in_proj_weight[:dim, :] _snake_case : Tuple = in_proj_bias[: dim] _snake_case : str = in_proj_weight[ dim : dim * 2, : ] _snake_case : Optional[int] = in_proj_bias[ dim : dim * 2 ] _snake_case : Optional[int] = in_proj_weight[ -dim :, : ] _snake_case : List[Any] = in_proj_bias[-dim :] # fmt: on def a__ ( a : Optional[Any] , a : List[Any] ): """simple docstring""" _snake_case : Optional[int] = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) _snake_case : Dict = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' ) _snake_case : Tuple = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : str = in_proj_weight[: hidden_size, :] _snake_case : Optional[int] = in_proj_bias[:config.hidden_size] _snake_case : Tuple = in_proj_weight[hidden_size : hidden_size * 2, :] _snake_case : int = in_proj_bias[hidden_size : hidden_size * 2] _snake_case : Dict = in_proj_weight[-hidden_size :, :] _snake_case : Union[str, Any] = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) _snake_case : Any = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' ) _snake_case : List[str] = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : Any = in_proj_weight[: hidden_size, :] _snake_case : Dict = in_proj_bias[:config.hidden_size] _snake_case : str = in_proj_weight[hidden_size : hidden_size * 2, :] _snake_case : List[str] = in_proj_bias[hidden_size : hidden_size * 2] _snake_case : int = in_proj_weight[-hidden_size :, :] _snake_case : str = in_proj_bias[-hidden_size :] # fmt: on def a__ ( ): """simple docstring""" _snake_case : str = "http://images.cocodataset.org/val2017/000000039769.jpg" _snake_case : Tuple = Image.open(requests.get(a , stream=a ).raw ) return im @torch.no_grad() def a__ ( a : Dict , a : Any , a : Optional[Any] , a : Any = False ): """simple docstring""" _snake_case : List[str] = get_maskformer_config(a ) # load original state_dict with open(a , "rb" ) as f: _snake_case : Optional[int] = pickle.load(a ) _snake_case : List[Any] = data["model"] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys _snake_case : int = create_rename_keys(a ) for src, dest in rename_keys: rename_key(a , a , a ) read_in_swin_q_k_v(a , config.backbone_config ) read_in_decoder_q_k_v(a , a ) # update to torch tensors for key, value in state_dict.items(): _snake_case : Optional[int] = torch.from_numpy(a ) # load 🤗 model _snake_case : Tuple = MaskFormerForInstanceSegmentation(a ) model.eval() for name, param in model.named_parameters(): print(a , param.shape ) _snake_case , _snake_case : Any = model.load_state_dict(a , strict=a ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(a ) == 0, f'Unexpected keys: {unexpected_keys}' # verify results _snake_case : str = prepare_img() if "vistas" in model_name: _snake_case : Dict = 65 elif "cityscapes" in model_name: _snake_case : int = 65_535 else: _snake_case : List[str] = 255 _snake_case : List[str] = True if "ade" in model_name else False _snake_case : Tuple = MaskFormerImageProcessor(ignore_index=a , reduce_labels=a ) _snake_case : Union[str, Any] = image_processor(a , return_tensors="pt" ) _snake_case : List[str] = model(**a ) print("Logits:" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": _snake_case : str = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , a , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'Saving model and image processor to {pytorch_dump_folder_path}' ) Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) image_processor.save_pretrained(a ) if push_to_hub: print("Pushing model and image processor to the hub..." ) model.push_to_hub(f'nielsr/{model_name}' ) image_processor.push_to_hub(f'nielsr/{model_name}' ) if __name__ == "__main__": _a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""maskformer-swin-tiny-ade""", type=str, help=("""Name of the MaskFormer model you'd like to convert""",), ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""", type=str, help="""Path to the original state dict (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _a : List[Any] = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
709
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Optional[int] = logging.get_logger(__name__) _a : str = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class _UpperCAmelCase ( _snake_case): __lowercase : Optional[Any] = """openai-gpt""" __lowercase : Dict = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , snake_case_=4_04_78 , snake_case_=5_12 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_="cls_index" , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=0.1 , **snake_case_ , ): _snake_case : Tuple = vocab_size _snake_case : Dict = n_positions _snake_case : Any = n_embd _snake_case : Any = n_layer _snake_case : Optional[int] = n_head _snake_case : Union[str, Any] = afn _snake_case : Dict = resid_pdrop _snake_case : str = embd_pdrop _snake_case : Union[str, Any] = attn_pdrop _snake_case : str = layer_norm_epsilon _snake_case : Union[str, Any] = initializer_range _snake_case : Any = summary_type _snake_case : List[str] = summary_use_proj _snake_case : Optional[int] = summary_activation _snake_case : Union[str, Any] = summary_first_dropout _snake_case : Optional[int] = summary_proj_to_labels super().__init__(**snake_case_ )
87
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor _a : Any = logging.get_logger(__name__) class _UpperCAmelCase ( UpperCamelCase__): def __init__( self , *snake_case_ , **snake_case_ ): warnings.warn( "The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use GLPNImageProcessor instead." , _a , ) super().__init__(*_a , **_a )
710
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() _a : Tuple = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _a : int = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def a__ ( a : List[str] , a : int , a : int ): """simple docstring""" _snake_case : Union[str, Any] = state_dict.pop(a ) _snake_case : Union[str, Any] = val def a__ ( a : Tuple ): """simple docstring""" _snake_case : Tuple = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: _snake_case : Dict = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) _snake_case : Tuple = value else: _snake_case : Dict = value return new_state_dict def a__ ( a : int ): """simple docstring""" _snake_case : Any = "" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) _snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : int = in_proj_weight[:256, :] _snake_case : List[str] = in_proj_bias[:256] _snake_case : Optional[Any] = in_proj_weight[256:512, :] _snake_case : List[str] = in_proj_bias[256:512] _snake_case : Dict = in_proj_weight[-256:, :] _snake_case : Dict = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention _snake_case : List[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) _snake_case : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : Union[str, Any] = in_proj_weight[:256, :] _snake_case : Tuple = in_proj_bias[:256] _snake_case : int = in_proj_weight[256:512, :] _snake_case : int = in_proj_bias[256:512] _snake_case : Dict = in_proj_weight[-256:, :] _snake_case : str = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention _snake_case : Dict = state_dict.pop( f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) _snake_case : Optional[int] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict _snake_case : Dict = in_proj_weight_cross_attn[:256, :] _snake_case : Any = in_proj_bias_cross_attn[:256] _snake_case : Union[str, Any] = in_proj_weight_cross_attn[256:512, :] _snake_case : Optional[int] = in_proj_bias_cross_attn[256:512] _snake_case : Any = in_proj_weight_cross_attn[-256:, :] _snake_case : str = in_proj_bias_cross_attn[-256:] def a__ ( a : str , a : int ): """simple docstring""" _snake_case , _snake_case : List[str] = image.size _snake_case : Dict = max(a , a ) _snake_case : Union[str, Any] = 800 if "detection" in checkpoint_url else 1_000 _snake_case : Any = target_max_size / current_max_size _snake_case : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def a__ ( a : str ): """simple docstring""" _snake_case : str = F.to_tensor(a ) _snake_case : Union[str, Any] = F.normalize(a , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def a__ ( a : Optional[Any] , a : Any , a : Union[str, Any] ): """simple docstring""" logger.info("Converting model..." ) # load original state dict _snake_case : Tuple = torch.hub.load_state_dict_from_url(a , map_location="cpu" ) # rename keys for src, dest in rename_keys: rename_key(a , a , a ) _snake_case : Union[str, Any] = rename_backbone_keys(a ) # query, key and value matrices need special treatment read_in_q_k_v(a ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _snake_case : int = "model." for key in state_dict.copy().keys(): if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): _snake_case : Optional[int] = state_dict.pop(a ) _snake_case : Any = val # create HuggingFace model and load state dict _snake_case : Tuple = TableTransformerConfig( backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: _snake_case : Any = 15 _snake_case : int = 2 _snake_case : Optional[Any] = {0: "table", 1: "table rotated"} _snake_case : Union[str, Any] = idalabel _snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} else: _snake_case : Any = 125 _snake_case : Union[str, Any] = 6 _snake_case : List[str] = { 0: "table", 1: "table column", 2: "table row", 3: "table column header", 4: "table projected row header", 5: "table spanning cell", } _snake_case : Any = idalabel _snake_case : Optional[int] = {v: k for k, v in idalabel.items()} _snake_case : Union[str, Any] = DetrImageProcessor( format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 ) _snake_case : str = TableTransformerForObjectDetection(a ) model.load_state_dict(a ) model.eval() # verify our conversion _snake_case : Optional[int] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png" _snake_case : Optional[Any] = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=a ) _snake_case : Dict = Image.open(a ).convert("RGB" ) _snake_case : Union[str, Any] = normalize(resize(a , a ) ).unsqueeze(0 ) _snake_case : str = model(a ) if "detection" in checkpoint_url: _snake_case : int = (1, 15, 3) _snake_case : List[str] = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) _snake_case : List[str] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: _snake_case : Union[str, Any] = (1, 125, 7) _snake_case : str = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) _snake_case : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) image_processor.save_pretrained(a ) if push_to_hub: # Push model to HF hub logger.info("Pushing model to the hub..." ) _snake_case : int = ( "microsoft/table-transformer-detection" if "detection" in checkpoint_url else "microsoft/table-transformer-structure-recognition" ) model.push_to_hub(a ) image_processor.push_to_hub(a ) if __name__ == "__main__": _a : Tuple = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _a : Any = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
87
0
"""simple docstring""" from math import factorial class _UpperCAmelCase : def __init__( self , snake_case_ , snake_case_ ): _snake_case : Dict = real if isinstance(UpperCamelCase__ , UpperCamelCase__ ): _snake_case : int = [1] * rank else: _snake_case : Optional[Any] = rank def __repr__( self ): return ( F'{self.real}+' F'{"+".join(str(UpperCamelCase__ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}' ) def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , UpperCamelCase__ ) def __add__( self , snake_case_ ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): return Dual(self.real + other , self.duals ) _snake_case : int = self.duals.copy() _snake_case : Dict = other.duals.copy() if len(UpperCamelCase__ ) > len(UpperCamelCase__ ): o_dual.extend([1] * (len(UpperCamelCase__ ) - len(UpperCamelCase__ )) ) elif len(UpperCamelCase__ ) < len(UpperCamelCase__ ): s_dual.extend([1] * (len(UpperCamelCase__ ) - len(UpperCamelCase__ )) ) _snake_case : Optional[Any] = [] for i in range(len(UpperCamelCase__ ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , UpperCamelCase__ ) __lowercase : Tuple = __add__ def __sub__( self , snake_case_ ): return self + other * -1 def __mul__( self , snake_case_ ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _snake_case : int = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , UpperCamelCase__ ) _snake_case : Optional[int] = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , UpperCamelCase__ ) __lowercase : List[Any] = __mul__ def __truediv__( self , snake_case_ ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _snake_case : Tuple = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , UpperCamelCase__ ) raise ValueError def __floordiv__( self , snake_case_ ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _snake_case : Any = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , UpperCamelCase__ ) raise ValueError def __pow__( self , snake_case_ ): if n < 0 or isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError("power must be a positive integer" ) if n == 0: return 1 if n == 1: return self _snake_case : Dict = self for _ in range(n - 1 ): x *= self return x def a__ ( a : List[str] , a : int , a : List[str] ): """simple docstring""" if not callable(__UpperCamelCase ): raise ValueError("differentiate() requires a function as input for func" ) if not isinstance(__UpperCamelCase , (float, int) ): raise ValueError("differentiate() requires a float as input for position" ) if not isinstance(__UpperCamelCase , __UpperCamelCase ): raise ValueError("differentiate() requires an int as input for order" ) _snake_case : Dict = Dual(__UpperCamelCase , 1 ) _snake_case : Any = func(__UpperCamelCase ) if order == 0: return result.real return result.duals[order - 1] * factorial(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() def a__ ( a : Union[str, Any] ): """simple docstring""" return y**2 * y**4 print(differentiate(f, 9, 2))
711
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
87
0
from __future__ import annotations def a__ ( a : str ): """simple docstring""" return [ord(lowerCamelCase_ ) - 96 for elem in plain] def a__ ( a : list[int] ): """simple docstring""" return "".join(chr(elem + 96 ) for elem in encoded ) def a__ ( ): """simple docstring""" _snake_case : Union[str, Any] = encode(input("-> " ).strip().lower() ) print("Encoded: " , lowerCamelCase_ ) print("Decoded:" , decode(lowerCamelCase_ ) ) if __name__ == "__main__": main()
712
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _a : Optional[int] = logging.get_logger(__name__) _a : List[str] = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class _UpperCAmelCase ( _snake_case , _snake_case): __lowercase : List[Any] = """convnextv2""" def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=4 , snake_case_=None , snake_case_=None , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.0 , snake_case_=2_24 , snake_case_=None , snake_case_=None , **snake_case_ , ): super().__init__(**snake_case_ ) _snake_case : Tuple = num_channels _snake_case : Optional[int] = patch_size _snake_case : Tuple = num_stages _snake_case : int = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes _snake_case : str = [3, 3, 9, 3] if depths is None else depths _snake_case : int = hidden_act _snake_case : Tuple = initializer_range _snake_case : Union[str, Any] = layer_norm_eps _snake_case : Optional[int] = drop_path_rate _snake_case : Union[str, Any] = image_size _snake_case : List[Any] = ["stem"] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )] _snake_case , _snake_case : Dict = get_aligned_output_features_output_indices( out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
87
0
"""simple docstring""" from __future__ import annotations def a__ ( a : int = 4 ): """simple docstring""" _snake_case : Union[str, Any] = abs(__snake_case ) or 4 return [[1 + x + y * row_size for x in range(__snake_case )] for y in range(__snake_case )] def a__ ( a : list[list[int]] ): """simple docstring""" return reverse_row(transpose(__snake_case ) ) # OR.. transpose(reverse_column(matrix)) def a__ ( a : list[list[int]] ): """simple docstring""" return reverse_row(reverse_column(__snake_case ) ) # OR.. reverse_column(reverse_row(matrix)) def a__ ( a : list[list[int]] ): """simple docstring""" return reverse_column(transpose(__snake_case ) ) # OR.. transpose(reverse_row(matrix)) def a__ ( a : list[list[int]] ): """simple docstring""" _snake_case : int = [list(__snake_case ) for x in zip(*__snake_case )] return matrix def a__ ( a : list[list[int]] ): """simple docstring""" _snake_case : Optional[int] = matrix[::-1] return matrix def a__ ( a : list[list[int]] ): """simple docstring""" _snake_case : Dict = [x[::-1] for x in matrix] return matrix def a__ ( a : list[list[int]] ): """simple docstring""" for i in matrix: print(*__snake_case ) if __name__ == "__main__": _a : int = make_matrix() print("""\norigin:\n""") print_matrix(matrix) print("""\nrotate 90 counterclockwise:\n""") print_matrix(rotate_aa(matrix)) _a : Optional[int] = make_matrix() print("""\norigin:\n""") print_matrix(matrix) print("""\nrotate 180:\n""") print_matrix(rotate_aaa(matrix)) _a : List[Any] = make_matrix() print("""\norigin:\n""") print_matrix(matrix) print("""\nrotate 270 counterclockwise:\n""") print_matrix(rotate_aaa(matrix))
713
"""simple docstring""" from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def a__ ( a : Namespace ): """simple docstring""" return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) _a : int = """ transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. """ class _UpperCAmelCase ( _snake_case): @staticmethod def lowerCamelCase__ ( snake_case_ ): _snake_case : Dict = parser.add_parser( "convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , ) train_parser.add_argument("--model_type" , type=snake_case_ , required=snake_case_ , help="Model's type." ) train_parser.add_argument( "--tf_checkpoint" , type=snake_case_ , required=snake_case_ , help="TensorFlow checkpoint path or folder." ) train_parser.add_argument( "--pytorch_dump_output" , type=snake_case_ , required=snake_case_ , help="Path to the PyTorch saved model output." ) train_parser.add_argument("--config" , type=snake_case_ , default="" , help="Configuration file path or folder." ) train_parser.add_argument( "--finetuning_task_name" , type=snake_case_ , default=snake_case_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , ) train_parser.set_defaults(func=snake_case_ ) def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ , ): _snake_case : str = logging.get_logger("transformers-cli/converting" ) self._logger.info(F'Loading model {model_type}' ) _snake_case : Optional[int] = model_type _snake_case : Any = tf_checkpoint _snake_case : Optional[int] = pytorch_dump_output _snake_case : Tuple = config _snake_case : Tuple = finetuning_task_name def lowerCamelCase__ ( self ): if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) if "ckpt" in self._tf_checkpoint.lower(): _snake_case : int = self._tf_checkpoint _snake_case : Optional[Any] = "" else: _snake_case : Optional[int] = self._tf_checkpoint _snake_case : List[str] = "" convert_transfo_xl_checkpoint_to_pytorch( snake_case_ , self._config , self._pytorch_dump_output , snake_case_ ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
87
0
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _a : str = 16 _a : Union[str, Any] = 32 def a__ ( a : Union[str, Any] , a : Optional[int] = 16 ): """simple docstring""" _snake_case : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" ) _snake_case : Dict = load_dataset("glue" , "mrpc" ) def tokenize_function(a : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) _snake_case : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a__ , max_length=a__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _snake_case : Optional[int] = datasets.map( a__ , batched=a__ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _snake_case : Tuple = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(a : str ): # On TPU it's best to pad everything to the same length or training will be very slow. _snake_case : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _snake_case : int = 16 elif accelerator.mixed_precision != "no": _snake_case : str = 8 else: _snake_case : Optional[int] = None return tokenizer.pad( a__ , padding="longest" , max_length=a__ , pad_to_multiple_of=a__ , return_tensors="pt" , ) # Instantiate dataloaders. _snake_case : Union[str, Any] = DataLoader( tokenized_datasets["train"] , shuffle=a__ , collate_fn=a__ , batch_size=a__ ) _snake_case : int = DataLoader( tokenized_datasets["validation"] , shuffle=a__ , collate_fn=a__ , batch_size=a__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _a : int = mocked_dataloaders # noqa: F811 def a__ ( a : str , a : Any ): """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , a__ ) == "1": _snake_case : Union[str, Any] = 2 # Initialize accelerator _snake_case : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _snake_case : Dict = config["lr"] _snake_case : str = int(config["num_epochs"] ) _snake_case : Optional[int] = int(config["seed"] ) _snake_case : Optional[int] = int(config["batch_size"] ) _snake_case : Tuple = evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation _snake_case : int = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _snake_case : List[str] = batch_size // MAX_GPU_BATCH_SIZE _snake_case : Dict = MAX_GPU_BATCH_SIZE set_seed(a__ ) _snake_case , _snake_case : Optional[Any] = get_dataloaders(a__ , a__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _snake_case : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=a__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _snake_case : int = model.to(accelerator.device ) # Instantiate optimizer _snake_case : Dict = AdamW(params=model.parameters() , lr=a__ ) # Instantiate scheduler _snake_case : Any = get_linear_schedule_with_warmup( optimizer=a__ , num_warmup_steps=100 , num_training_steps=(len(a__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = accelerator.prepare( a__ , a__ , a__ , a__ , a__ ) # Now we train the model for epoch in range(a__ ): model.train() for step, batch in enumerate(a__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _snake_case : Tuple = model(**a__ ) _snake_case : int = outputs.loss _snake_case : Optional[int] = loss / gradient_accumulation_steps accelerator.backward(a__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() _snake_case : List[Any] = 0 for step, batch in enumerate(a__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _snake_case : Optional[Any] = model(**a__ ) _snake_case : Union[str, Any] = outputs.logits.argmax(dim=-1 ) _snake_case , _snake_case : Optional[Any] = accelerator.gather((predictions, batch["labels"]) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(a__ ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples _snake_case : str = predictions[: len(eval_dataloader.dataset ) - samples_seen] _snake_case : Any = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=a__ , references=a__ , ) _snake_case : List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , a__ ) def a__ ( ): """simple docstring""" _snake_case : int = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=a__ , default=a__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) _snake_case : Tuple = parser.parse_args() _snake_case : Optional[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(a__ , a__ ) if __name__ == "__main__": main()
714
"""simple docstring""" import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def a__ ( a : List[str] , a : Any ): """simple docstring""" if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer _snake_case : Any = flax_key_tuple[:-1] + ("weight",) _snake_case : str = torch.permute(a , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(a ): # linear layer _snake_case : Optional[int] = flax_key_tuple[:-1] + ("weight",) _snake_case : Any = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: _snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("weight",) return flax_key_tuple, flax_tensor def a__ ( a : List[Any] , a : Union[str, Any] , a : List[str] ): """simple docstring""" if "metadata" in layer: _snake_case : Optional[int] = layer.split("metadata" ) _snake_case : Optional[int] = "".join(split_layer[0] )[:-1] _snake_case : int = [tuple(("metadata" + split_layer[1]).split("/" ) )] elif "kvstore" in layer: _snake_case : Any = layer.split("kvstore" ) _snake_case : str = "".join(split_layer[0] )[:-1] _snake_case : Any = [tuple(("kvstore" + split_layer[1]).split("/" ) )] else: _snake_case : List[Any] = layer.split("/" ) _snake_case : Tuple = "/".join(split_layer[:-1] ) _snake_case : int = (split_layer[-1],) if "kvstore/path" in layer: _snake_case : Optional[Any] = f'{switch_checkpoint_path}/{checkpoint_info[layer]}' elif "kvstore/driver" in layer: _snake_case : Tuple = "file" else: _snake_case : Optional[int] = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def a__ ( a : List[Any] , a : List[Any] ): """simple docstring""" _snake_case : Union[str, Any] = rename_keys(a ) _snake_case : int = {} for k, v in current_block.items(): _snake_case : Optional[int] = v _snake_case : Optional[int] = new_current_block torch.save(a , a ) def a__ ( a : Dict , a : Tuple , a : List[str] , a : int , a : str = WEIGHTS_NAME ): """simple docstring""" _snake_case : Any = convert_file_size_to_int(a ) _snake_case : Tuple = [] _snake_case : Optional[int] = {} _snake_case : Tuple = 0 _snake_case : Optional[Any] = 0 os.makedirs(a , exist_ok=a ) with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp: _snake_case : Any = serialization.msgpack_restore(fp.read() )["optimizer"]["target"] _snake_case : Optional[Any] = flatten_dict(a , sep="/" ) _snake_case : Optional[Any] = {} for layer in checkpoint_info.keys(): _snake_case , _snake_case , _snake_case : int = get_key_and_tensorstore_dict( a , a , a ) if curr_real_layer_name in all_layers: _snake_case : Dict = content else: _snake_case : Tuple = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file _snake_case : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() _snake_case : Dict = torch.tensor(a ) _snake_case : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts _snake_case , _snake_case : Optional[int] = rename_base_flax_keys(tuple(key.split("/" ) ) , a ) _snake_case : Optional[Any] = "/".join(a ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: _snake_case : Any = os.path.join( a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) ) rename_and_save_block(a , a ) sharded_state_dicts.append(current_block.keys() ) del current_block _snake_case : List[Any] = {} _snake_case : str = 0 _snake_case : List[str] = raw_weights.to(getattr(a , a ) ) current_block_size += weight_size total_size += weight_size # Add the last block _snake_case : int = os.path.join(a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) ) rename_and_save_block(a , a ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(a ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index _snake_case : str = {} _snake_case : Any = {} for idx, shard in enumerate(a ): _snake_case : Optional[int] = weights_name.replace( ".bin" , f'-{idx+1:05d}-of-{len(a ):05d}.bin' ) # len(sharded_state_dicts):05d} _snake_case : Dict = os.path.join(a , weights_name.replace(".bin" , f'-{idx+1:05d}-of-???.bin' ) ) os.rename(a , os.path.join(a , a ) ) _snake_case : Dict = shard for key in shard: _snake_case : int = shard_file # Add the metadata _snake_case : List[Any] = {"total_size": total_size} _snake_case : Any = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f: _snake_case : Union[str, Any] = json.dumps(a , indent=2 , sort_keys=a ) + "\n" f.write(a ) return metadata, index if __name__ == "__main__": _a : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""") parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""", type=str, required=False, help="""Path to the output pytorch model.""", ) _a : Optional[int] = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def a__ ( ): """simple docstring""" from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer _snake_case : List[str] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" ) config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" ) _snake_case : str = SwitchTransformersForConditionalGeneration.from_pretrained( "/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" ) _snake_case : List[Any] = TaTokenizer.from_pretrained("t5-small" ) _snake_case : Optional[Any] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." _snake_case : Dict = tokenizer(a , return_tensors="pt" ).input_ids _snake_case : List[Any] = model.generate(a , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
87
0
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class _UpperCAmelCase : def __init__( self , snake_case_ , snake_case_=14 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=4 , snake_case_=4 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=0.02 , ): _snake_case : int = parent _snake_case : str = batch_size _snake_case : Optional[Any] = seq_length _snake_case : Dict = is_training _snake_case : Optional[Any] = use_input_mask _snake_case : Optional[int] = use_token_type_ids _snake_case : Union[str, Any] = use_labels _snake_case : List[str] = vocab_size _snake_case : Any = hidden_size _snake_case : str = rotary_dim _snake_case : Tuple = num_hidden_layers _snake_case : Optional[Any] = num_attention_heads _snake_case : Tuple = intermediate_size _snake_case : Dict = hidden_act _snake_case : Union[str, Any] = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : Optional[Any] = max_position_embeddings _snake_case : Dict = initializer_range _snake_case : Union[str, Any] = None _snake_case : Union[str, Any] = vocab_size - 1 _snake_case : int = vocab_size - 1 _snake_case : Tuple = vocab_size - 1 def lowerCamelCase__ ( self ): _snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : int = None if self.use_input_mask: _snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case : int = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCamelCase__ ( self ): _snake_case : Any = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : List[str] = config_and_inputs _snake_case : List[str] = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Union[str, Any] = 20 _snake_case : List[Any] = model_class_name(__A ) _snake_case : int = model.init_cache(input_ids.shape[0] , __A ) _snake_case : Union[str, Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" ) _snake_case : str = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) _snake_case : List[str] = model( input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , ) _snake_case : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" ) _snake_case : Union[str, Any] = model( input_ids[:, -1:] , attention_mask=__A , past_key_values=outputs_cache.past_key_values , position_ids=__A , ) _snake_case : Any = model(__A ) _snake_case : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : str = 20 _snake_case : Optional[int] = model_class_name(__A ) _snake_case : Any = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) _snake_case : List[str] = model.init_cache(input_ids.shape[0] , __A ) _snake_case : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) _snake_case : Dict = model( input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , ) _snake_case : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" ) _snake_case : int = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__A , position_ids=__A , ) _snake_case : Tuple = model(__A , attention_mask=__A ) _snake_case : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' ) @require_flax class _UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase): __lowercase : List[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowercase : List[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCamelCase__ ( self ): _snake_case : Any = FlaxGPTJModelTester(self ) def lowerCamelCase__ ( self ): for model_class_name in self.all_model_classes: _snake_case , _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(__A , __A , __A , __A ) def lowerCamelCase__ ( self ): for model_class_name in self.all_model_classes: _snake_case , _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( __A , __A , __A , __A ) @tooslow def lowerCamelCase__ ( self ): _snake_case : Union[str, Any] = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" ) _snake_case : Tuple = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=__A , truncation=__A ) _snake_case : int = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" ) _snake_case : int = False _snake_case : List[Any] = model.config.eos_token_id _snake_case : List[str] = jax.jit(model.generate ) _snake_case : Dict = jit_generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences _snake_case : Union[str, Any] = tokenizer.batch_decode(__A , skip_special_tokens=__A ) _snake_case : str = [ "Hello this is a long string of text.\n\nI'm trying to get the text of the", "Hey, I'm a little late to the party. I'm going to", ] self.assertListEqual(__A , __A ) @is_pt_flax_cross_test def lowerCamelCase__ ( self ): _snake_case , _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs _snake_case : Dict = self._prepare_for_class(__A , __A ) _snake_case : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class _snake_case : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning _snake_case : Union[str, Any] = getattr(__A , __A ) _snake_case , _snake_case : Any = pt_inputs["input_ids"].shape _snake_case : Optional[int] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__A ): _snake_case : List[Any] = 0 _snake_case : List[str] = 1 _snake_case : Tuple = 0 _snake_case : List[str] = 1 _snake_case : Optional[Any] = pt_model_class(__A ).eval() _snake_case : Optional[int] = model_class(__A , dtype=jnp.floataa ) _snake_case : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __A ) _snake_case : Optional[int] = fx_state with torch.no_grad(): _snake_case : Dict = pt_model(**__A ).to_tuple() _snake_case : Dict = fx_model(**__A ).to_tuple() self.assertEqual(len(__A ) , len(__A ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__A ) _snake_case : str = model_class.from_pretrained(__A , from_pt=__A ) _snake_case : Optional[int] = fx_model_loaded(**__A ).to_tuple() self.assertEqual( len(__A ) , len(__A ) , "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def lowerCamelCase__ ( self ): _snake_case , _snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs _snake_case : Dict = self._prepare_for_class(__A , __A ) _snake_case : Any = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class _snake_case : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning _snake_case : str = getattr(__A , __A ) _snake_case : Any = pt_model_class(__A ).eval() _snake_case : List[Any] = model_class(__A , dtype=jnp.floataa ) _snake_case : int = load_flax_weights_in_pytorch_model(__A , fx_model.params ) _snake_case , _snake_case : Dict = pt_inputs["input_ids"].shape _snake_case : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__A ): _snake_case : Any = 0 _snake_case : List[str] = 1 _snake_case : List[str] = 0 _snake_case : Tuple = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): _snake_case : str = pt_model(**__A ).to_tuple() _snake_case : str = fx_model(**__A ).to_tuple() self.assertEqual(len(__A ) , len(__A ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__A ) _snake_case : str = pt_model_class.from_pretrained(__A , from_flax=__A ) with torch.no_grad(): _snake_case : List[Any] = pt_model_loaded(**__A ).to_tuple() self.assertEqual( len(__A ) , len(__A ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def lowerCamelCase__ ( self ): for model_class_name in self.all_model_classes: _snake_case : Dict = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" ) _snake_case : Optional[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__A )
715
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class _UpperCAmelCase ( _snake_case , _snake_case , unittest.TestCase): __lowercase : Dict = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) __lowercase : Optional[Any] = ( { """feature-extraction""": TFMobileBertModel, """fill-mask""": TFMobileBertForMaskedLM, """question-answering""": TFMobileBertForQuestionAnswering, """text-classification""": TFMobileBertForSequenceClassification, """token-classification""": TFMobileBertForTokenClassification, """zero-shot""": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) __lowercase : Union[str, Any] = False __lowercase : Optional[int] = False def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_=False ): _snake_case : Union[str, Any] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) if return_labels: if model_class in get_values(snake_case_ ): _snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ): _snake_case : Optional[Any] = parent _snake_case : List[Any] = batch_size _snake_case : Optional[int] = seq_length _snake_case : Dict = is_training _snake_case : Union[str, Any] = use_input_mask _snake_case : List[Any] = use_token_type_ids _snake_case : int = use_labels _snake_case : Dict = vocab_size _snake_case : Tuple = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : Optional[Any] = intermediate_size _snake_case : Dict = hidden_act _snake_case : Tuple = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : str = max_position_embeddings _snake_case : str = type_vocab_size _snake_case : Any = type_sequence_label_size _snake_case : Optional[int] = initializer_range _snake_case : List[Any] = num_labels _snake_case : Optional[int] = num_choices _snake_case : Optional[int] = scope _snake_case : Any = embedding_size def lowerCamelCase__ ( self ): _snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : Optional[Any] = None if self.use_input_mask: _snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case : List[str] = None if self.use_token_type_ids: _snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _snake_case : Dict = None _snake_case : Tuple = None _snake_case : str = None if self.use_labels: _snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) _snake_case : Tuple = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Dict = TFMobileBertModel(config=snake_case_ ) _snake_case : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Optional[int] = model(snake_case_ ) _snake_case : Union[str, Any] = [input_ids, input_mask] _snake_case : Optional[Any] = model(snake_case_ ) _snake_case : Dict = model(snake_case_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : List[Any] = TFMobileBertForMaskedLM(config=snake_case_ ) _snake_case : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : List[str] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=snake_case_ ) _snake_case : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Tuple = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : str = TFMobileBertForPreTraining(config=snake_case_ ) _snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : List[Any] = model(snake_case_ ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : str = self.num_labels _snake_case : str = TFMobileBertForSequenceClassification(config=snake_case_ ) _snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Optional[int] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Any = self.num_choices _snake_case : Tuple = TFMobileBertForMultipleChoice(config=snake_case_ ) _snake_case : List[Any] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) ) _snake_case : List[str] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) ) _snake_case : Tuple = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) ) _snake_case : int = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _snake_case : Optional[Any] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Union[str, Any] = self.num_labels _snake_case : Optional[int] = TFMobileBertForTokenClassification(config=snake_case_ ) _snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : List[Any] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : int = TFMobileBertForQuestionAnswering(config=snake_case_ ) _snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Union[str, Any] = model(snake_case_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Tuple = config_and_inputs _snake_case : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict def lowerCamelCase__ ( self ): _snake_case : int = TFMobileBertModelTest.TFMobileBertModelTester(self ) _snake_case : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 ) def lowerCamelCase__ ( self ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self ): _snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ ) @slow def lowerCamelCase__ ( self ): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _snake_case : str = TFMobileBertModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) @require_tf class _UpperCAmelCase ( unittest.TestCase): @slow def lowerCamelCase__ ( self ): _snake_case : Any = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" ) _snake_case : str = tf.constant([[0, 1, 2, 3, 4, 5]] ) _snake_case : Union[str, Any] = model(snake_case_ )[0] _snake_case : int = [1, 6, 3_05_22] self.assertEqual(output.shape , snake_case_ ) _snake_case : Optional[Any] = tf.constant( [ [ [-4.5919547, -9.248295, -9.645256], [-6.7306175, -6.440284, -6.6052837], [-7.2743506, -6.7847915, -6.024673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
87
0
"""simple docstring""" import sys def a__ ( a : Any ): """simple docstring""" _snake_case : Dict = len(_UpperCAmelCase ) _snake_case : Tuple = [[0 for x in range(_UpperCAmelCase )] for x in range(_UpperCAmelCase )] _snake_case : int = [[0 for x in range(_UpperCAmelCase )] for x in range(_UpperCAmelCase )] for chain_length in range(2 , _UpperCAmelCase ): for a in range(1 , n - chain_length + 1 ): _snake_case : Any = a + chain_length - 1 _snake_case : Optional[Any] = sys.maxsize for c in range(_UpperCAmelCase , _UpperCAmelCase ): _snake_case : int = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: _snake_case : List[str] = cost _snake_case : int = c return matrix, sol def a__ ( a : Optional[int] , a : Optional[Any] , a : List[str] ): """simple docstring""" if i == j: print("A" + str(_UpperCAmelCase ) , end=" " ) else: print("(" , end=" " ) print_optiomal_solution(_UpperCAmelCase , _UpperCAmelCase , optimal_solution[i][j] ) print_optiomal_solution(_UpperCAmelCase , optimal_solution[i][j] + 1 , _UpperCAmelCase ) print(")" , end=" " ) def a__ ( ): """simple docstring""" _snake_case : str = [30, 35, 15, 5, 10, 20, 25] _snake_case : Optional[int] = len(_UpperCAmelCase ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 _snake_case , _snake_case : int = matrix_chain_order(_UpperCAmelCase ) print("No. of Operation required: " + str(matrix[1][n - 1] ) ) print_optiomal_solution(_UpperCAmelCase , 1 , n - 1 ) if __name__ == "__main__": main()
716
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _a : List[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = ["""BartphoTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys _a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a : int = logging.get_logger(__name__) _a : Union[str, Any] = { """sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""", # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class _UpperCAmelCase ( UpperCamelCase_): __lowercase : Any = """vit_msn""" def __init__( self , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1E-06 , snake_case_=2_24 , snake_case_=16 , snake_case_=3 , snake_case_=True , **snake_case_ , ): super().__init__(**__a ) _snake_case : List[str] = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : Optional[Any] = num_attention_heads _snake_case : str = intermediate_size _snake_case : List[str] = hidden_act _snake_case : List[Any] = hidden_dropout_prob _snake_case : Tuple = attention_probs_dropout_prob _snake_case : List[str] = initializer_range _snake_case : Optional[int] = layer_norm_eps _snake_case : Dict = image_size _snake_case : int = patch_size _snake_case : Dict = num_channels _snake_case : Tuple = qkv_bias
717
"""simple docstring""" def a__ ( a : list , a : int , a : int = 0 , a : int = 0 ): """simple docstring""" _snake_case : Optional[int] = right or len(a ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(a , a , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
87
0
"""simple docstring""" import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _a : List[str] = logging.get_logger(__name__) _a : int = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } _a : List[str] = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def a__ ( a : Dict , a : int , a : List[Any] , a : Tuple , a : str ): """simple docstring""" for attribute in key.split("." ): _snake_case : int = getattr(_lowercase , _lowercase ) if weight_type is not None: _snake_case : Dict = getattr(_lowercase , _lowercase ).shape else: _snake_case : int = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": _snake_case : str = value elif weight_type == "weight_g": _snake_case : Dict = value elif weight_type == "weight_v": _snake_case : int = value elif weight_type == "bias": _snake_case : List[Any] = value else: _snake_case : int = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def a__ ( a : int , a : Dict ): """simple docstring""" _snake_case : Optional[int] = [] _snake_case : Optional[int] = fairseq_model.state_dict() _snake_case : Any = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight _snake_case : Any = None for name, value in fairseq_dict.items(): _snake_case : str = False if "conv_layers" in name: load_conv_layer( _lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == "group" , ) _snake_case : Any = True elif name.split("." )[0] == "proj": _snake_case : Optional[Any] = fairseq_model.proj _snake_case : List[str] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _snake_case : Union[str, Any] = True if "*" in mapped_key: _snake_case : Any = name.split(_lowercase )[0].split("." )[-2] _snake_case : str = mapped_key.replace("*" , _lowercase ) if "weight_g" in name: _snake_case : List[Any] = "weight_g" elif "weight_v" in name: _snake_case : Optional[int] = "weight_v" elif "bias" in name: _snake_case : int = "bias" elif "weight" in name: _snake_case : Tuple = "weight" else: _snake_case : Any = None set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) continue if not is_used: unused_weights.append(_lowercase ) logger.warning(f'Unused weights: {unused_weights}' ) return proj_weight def a__ ( a : Dict , a : int , a : List[Any] , a : int , a : Optional[int] ): """simple docstring""" _snake_case : int = full_name.split("conv_layers." )[-1] _snake_case : Optional[int] = name.split("." ) _snake_case : int = int(items[0] ) _snake_case : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) _snake_case : Optional[Any] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) _snake_case : Optional[int] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) _snake_case : Any = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) _snake_case : Tuple = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(_lowercase ) def a__ ( a : Optional[Any] ): """simple docstring""" _snake_case , _snake_case : Dict = emb.weight.shape _snake_case : Any = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) _snake_case : Tuple = emb.weight.data return lin_layer def a__ ( a : List[str] ): """simple docstring""" with open(_lowercase , "r" , encoding="utf-8" ) as f: _snake_case : Dict = f.readlines() _snake_case : Any = [line.split(" " )[0] for line in lines] _snake_case : List[str] = len(_lowercase ) _snake_case : Optional[Any] = { "<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, } vocab_dict.update(dict(zip(_lowercase , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def a__ ( a : Dict , a : List[Any] , a : List[Any] , a : Union[str, Any] , a : Any , a : List[Any] , a : Optional[Any] , ): """simple docstring""" _snake_case : Union[str, Any] = WavaVecaConfig.from_pretrained(_lowercase ) _snake_case : Tuple = SpeechaTextaConfig.from_pretrained( _lowercase , vocab_size=_lowercase , decoder_layers=_lowercase , do_stable_layer_norm=_lowercase ) _snake_case : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , ) _snake_case , _snake_case , _snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) _snake_case : List[str] = model[0].eval() # set weights for wav2vec2 encoder _snake_case : List[str] = WavaVecaModel(_lowercase ) _snake_case : int = recursively_load_weights_wavaveca(model.encoder , _lowercase ) _snake_case : Optional[int] = SpeechaTextaForCausalLM(_lowercase ) _snake_case , _snake_case : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowercase ) # set output linear layer unexpected_keys.remove("embed_out" ) _snake_case : int = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' ) logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' ) _snake_case : List[str] = SpeechEncoderDecoderModel(encoder=_lowercase , decoder=_lowercase ) _snake_case : Optional[int] = False # add projection layer _snake_case : List[Any] = nn.Parameter(projection_layer.weight ) _snake_case : Union[str, Any] = nn.Parameter(projection_layer.bias ) _snake_case : List[Any] = create_vocab_dict(_lowercase ) with open(os.path.join(_lowercase , "vocab.json" ) , "w" ) as fp: json.dump(_lowercase , _lowercase ) _snake_case : Dict = SpeechaTextaTokenizer(os.path.join(_lowercase , "vocab.json" ) ) tokenizer.save_pretrained(_lowercase ) _snake_case : List[Any] = hf_wavavec.config.to_dict() _snake_case : Tuple = tokenizer.pad_token_id _snake_case : List[Any] = tokenizer.bos_token_id _snake_case : Dict = tokenizer.eos_token_id _snake_case : Optional[int] = "speech_to_text_2" _snake_case : Union[str, Any] = "wav2vec2" _snake_case : Tuple = SpeechEncoderDecoderConfig.from_dict(_lowercase ) hf_wavavec.save_pretrained(_lowercase ) feature_extractor.save_pretrained(_lowercase ) if __name__ == "__main__": _a : int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument( """--encoder_config_path""", default="""facebook/wav2vec2-large-lv60""", type=str, help="""Path to hf encoder wav2vec2 checkpoint config""", ) parser.add_argument( """--decoder_config_path""", default="""facebook/s2t-small-mustc-en-fr-st""", type=str, help="""Path to hf decoder s2t checkpoint config""", ) parser.add_argument("""--vocab_size""", default=10_224, type=int, help="""Vocab size of decoder""") parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""") _a : int = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
718
"""simple docstring""" from __future__ import annotations class _UpperCAmelCase : def __init__( self , snake_case_ , snake_case_ ): _snake_case , _snake_case : Dict = text, pattern _snake_case , _snake_case : int = len(snake_case_ ), len(snake_case_ ) def lowerCamelCase__ ( self , snake_case_ ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def lowerCamelCase__ ( self , snake_case_ ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def lowerCamelCase__ ( self ): # searches pattern in text and returns index positions _snake_case : List[str] = [] for i in range(self.textLen - self.patLen + 1 ): _snake_case : Union[str, Any] = self.mismatch_in_text(snake_case_ ) if mismatch_index == -1: positions.append(snake_case_ ) else: _snake_case : Tuple = self.match_in_pattern(self.text[mismatch_index] ) _snake_case : Tuple = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions _a : List[Any] = """ABAABA""" _a : str = """AB""" _a : List[Any] = BoyerMooreSearch(text, pattern) _a : Any = bms.bad_character_heuristic() if len(positions) == 0: print("""No match found""") else: print("""Pattern found in following positions: """) print(positions)
87
0
"""simple docstring""" import qiskit def a__ ( a : int , a : int ): """simple docstring""" _snake_case : List[Any] = qiskit.Aer.get_backend("aer_simulator" ) # Create a Quantum Circuit acting on the q register _snake_case : int = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator _snake_case : str = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1_000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(_lowerCamelCase ) if __name__ == "__main__": print(f'Total count for various states are: {single_qubit_measure(1, 1)}')
719
"""simple docstring""" from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": _a : Dict = input("""Enter image url: """).strip() print(f'Downloading image from {url} ...') _a : str = BeautifulSoup(requests.get(url).content, """html.parser""") # The image URL is in the content field of the first meta tag with property og:image _a : str = soup.find("""meta""", {"""property""": """og:image"""})["""content"""] _a : Dict = requests.get(image_url).content _a : str = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg' with open(file_name, """wb""") as fp: fp.write(image_data) print(f'Done. Image saved to disk as {file_name}.')
87
0
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
720
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _a : Optional[int] = { """configuration_pix2struct""": [ """PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Pix2StructConfig""", """Pix2StructTextConfig""", """Pix2StructVisionConfig""", ], """processing_pix2struct""": ["""Pix2StructProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = ["""Pix2StructImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Dict = [ """PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Pix2StructPreTrainedModel""", """Pix2StructForConditionalGeneration""", """Pix2StructVisionModel""", """Pix2StructTextModel""", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys _a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" import math class _UpperCAmelCase : def lowerCamelCase__ ( self , snake_case_ , snake_case_ ): _snake_case : str = 0.0 _snake_case : int = 0.0 for i in range(len(_a ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): for i in range(len(_a ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def a__ ( ): """simple docstring""" _snake_case : Tuple = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _snake_case : Union[str, Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _snake_case : Optional[int] = SelfOrganizingMap() _snake_case : int = 3 _snake_case : List[str] = 0.5 for _ in range(lowerCAmelCase__ ): for j in range(len(lowerCAmelCase__ ) ): # training sample _snake_case : str = training_samples[j] # Compute the winning vector _snake_case : Optional[Any] = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ ) # Update the winning vector _snake_case : Any = self_organizing_map.update(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # classify test sample _snake_case : Optional[Any] = [0, 0, 0, 1] _snake_case : Union[str, Any] = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ ) # results print(f'Clusters that the test sample belongs to : {winner}' ) print(f'Weights that have been trained : {weights}' ) # running the main() function if __name__ == "__main__": main()
721
"""simple docstring""" import argparse import json import subprocess def a__ ( a : Optional[Any] , a : Optional[int] ): """simple docstring""" _snake_case : str = [] _snake_case : Optional[Any] = ( f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"' " https://api.github.com/repos/huggingface/transformers/actions/runners" ) _snake_case : Dict = subprocess.run(a , shell=a , stdout=subprocess.PIPE ) _snake_case : Tuple = output.stdout.decode("utf-8" ) _snake_case : List[str] = json.loads(a ) _snake_case : Any = status["runners"] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(a ) # save the result so we can report them on Slack with open("offline_runners.txt" , "w" ) as fp: fp.write(json.dumps(a ) ) if len(a ) > 0: _snake_case : Any = "\n".join([x["name"] for x in offline_runners] ) raise ValueError(f'The following runners are offline:\n{failed}' ) if __name__ == "__main__": def a__ ( a : Optional[int] ): """simple docstring""" return values.split("," ) _a : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--target_runners""", default=None, type=list_str, required=True, help="""Comma-separated list of runners to check status.""", ) parser.add_argument( """--token""", default=None, type=str, required=True, help="""A token that has actions:read permission.""" ) _a : List[str] = parser.parse_args() get_runner_status(args.target_runners, args.token)
87
0
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def a__ ( a : str , a : List[str] , a : List[Any] ): """simple docstring""" if isinstance(_lowerCamelCase , torch.Tensor ): return image elif isinstance(_lowerCamelCase , PIL.Image.Image ): _snake_case : Optional[Any] = [image] if isinstance(image[0] , PIL.Image.Image ): _snake_case : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image] _snake_case : Union[str, Any] = np.concatenate(_lowerCamelCase , axis=0 ) _snake_case : int = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0 _snake_case : str = image.transpose(0 , 3 , 1 , 2 ) _snake_case : Any = 2.0 * image - 1.0 _snake_case : List[Any] = torch.from_numpy(_lowerCamelCase ) elif isinstance(image[0] , torch.Tensor ): _snake_case : Dict = torch.cat(_lowerCamelCase , dim=0 ) return image def a__ ( a : Tuple , a : Union[str, Any] , a : List[str] , a : List[str]=0.9995 ): """simple docstring""" if not isinstance(_lowerCamelCase , np.ndarray ): _snake_case : Dict = True _snake_case : Union[str, Any] = va.device _snake_case : Optional[Any] = va.cpu().numpy() _snake_case : List[Any] = va.cpu().numpy() _snake_case : Any = np.sum(va * va / (np.linalg.norm(_lowerCamelCase ) * np.linalg.norm(_lowerCamelCase )) ) if np.abs(_lowerCamelCase ) > DOT_THRESHOLD: _snake_case : Any = (1 - t) * va + t * va else: _snake_case : List[str] = np.arccos(_lowerCamelCase ) _snake_case : str = np.sin(_lowerCamelCase ) _snake_case : Dict = theta_a * t _snake_case : Union[str, Any] = np.sin(_lowerCamelCase ) _snake_case : Optional[int] = np.sin(theta_a - theta_t ) / sin_theta_a _snake_case : Optional[Any] = sin_theta_t / sin_theta_a _snake_case : int = sa * va + sa * va if inputs_are_torch: _snake_case : Optional[Any] = torch.from_numpy(_lowerCamelCase ).to(_lowerCamelCase ) return va def a__ ( a : str , a : Any ): """simple docstring""" _snake_case : str = F.normalize(_lowerCamelCase , dim=-1 ) _snake_case : Optional[int] = F.normalize(_lowerCamelCase , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def a__ ( a : Dict , a : Any ): """simple docstring""" for param in model.parameters(): _snake_case : Tuple = value class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , ): super().__init__() self.register_modules( vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , clip_model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , coca_model=UpperCamelCase__ , coca_tokenizer=UpperCamelCase__ , coca_transform=UpperCamelCase__ , ) _snake_case : Optional[int] = ( feature_extractor.size if isinstance(feature_extractor.size , UpperCamelCase__ ) else feature_extractor.size["shortest_edge"] ) _snake_case : str = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , UpperCamelCase__ ) set_requires_grad(self.clip_model , UpperCamelCase__ ) def lowerCamelCase__ ( self , snake_case_ = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _snake_case : Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCamelCase__ ) def lowerCamelCase__ ( self ): self.enable_attention_slicing(UpperCamelCase__ ) def lowerCamelCase__ ( self ): set_requires_grad(self.vae , UpperCamelCase__ ) def lowerCamelCase__ ( self ): set_requires_grad(self.vae , UpperCamelCase__ ) def lowerCamelCase__ ( self ): set_requires_grad(self.unet , UpperCamelCase__ ) def lowerCamelCase__ ( self ): set_requires_grad(self.unet , UpperCamelCase__ ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Dict = min(int(num_inference_steps * strength ) , UpperCamelCase__ ) _snake_case : Tuple = max(num_inference_steps - init_timestep , 0 ) _snake_case : Tuple = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ): if not isinstance(UpperCamelCase__ , torch.Tensor ): raise ValueError(F'`image` has to be of type `torch.Tensor` but is {type(UpperCamelCase__ )}' ) _snake_case : str = image.to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): _snake_case : Union[str, Any] = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCamelCase__ ) ] _snake_case : List[str] = torch.cat(UpperCamelCase__ , dim=0 ) else: _snake_case : Union[str, Any] = self.vae.encode(UpperCamelCase__ ).latent_dist.sample(UpperCamelCase__ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _snake_case : List[Any] = 0.18215 * init_latents _snake_case : List[str] = init_latents.repeat_interleave(UpperCamelCase__ , dim=0 ) _snake_case : Tuple = randn_tensor(init_latents.shape , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ ) # get latents _snake_case : int = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _snake_case : int = init_latents return latents def lowerCamelCase__ ( self , snake_case_ ): _snake_case : Any = self.coca_transform(UpperCamelCase__ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): _snake_case : int = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) _snake_case : str = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ ): _snake_case : int = self.feature_extractor.preprocess(UpperCamelCase__ ) _snake_case : List[str] = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half() _snake_case : str = self.clip_model.get_image_features(UpperCamelCase__ ) _snake_case : Optional[int] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase__ ) _snake_case : Tuple = image_embeddings_clip.repeat_interleave(UpperCamelCase__ , dim=0 ) return image_embeddings_clip @torch.enable_grad() def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ): _snake_case : Any = latents.detach().requires_grad_() _snake_case : List[Any] = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ ) # predict the noise residual _snake_case : Union[str, Any] = self.unet(UpperCamelCase__ , UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): _snake_case : Any = self.scheduler.alphas_cumprod[timestep] _snake_case : Optional[Any] = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _snake_case : Optional[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 _snake_case : Dict = torch.sqrt(UpperCamelCase__ ) _snake_case : Optional[int] = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , UpperCamelCase__ ): _snake_case : int = self.scheduler.sigmas[index] _snake_case : Tuple = latents - sigma * noise_pred else: raise ValueError(F'scheduler type {type(self.scheduler )} not supported' ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _snake_case : Dict = 1 / 0.18215 * sample _snake_case : int = self.vae.decode(UpperCamelCase__ ).sample _snake_case : Dict = (image / 2 + 0.5).clamp(0 , 1 ) _snake_case : Dict = transforms.Resize(self.feature_extractor_size )(UpperCamelCase__ ) _snake_case : Dict = self.normalize(UpperCamelCase__ ).to(latents.dtype ) _snake_case : Union[str, Any] = self.clip_model.get_image_features(UpperCamelCase__ ) _snake_case : Optional[int] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase__ ) _snake_case : Optional[Any] = spherical_dist_loss(UpperCamelCase__ , UpperCamelCase__ ).mean() * clip_guidance_scale _snake_case : Union[str, Any] = -torch.autograd.grad(UpperCamelCase__ , UpperCamelCase__ )[0] if isinstance(self.scheduler , UpperCamelCase__ ): _snake_case : Optional[Any] = latents.detach() + grads * (sigma**2) _snake_case : Tuple = noise_pred_original else: _snake_case : int = noise_pred_original - torch.sqrt(UpperCamelCase__ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self , snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = 5_12 , snake_case_ = 5_12 , snake_case_ = 0.6 , snake_case_ = 50 , snake_case_ = 7.5 , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = 1_00 , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , snake_case_ = 0.8 , snake_case_ = 0.1 , snake_case_ = 0.1 , ): if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != batch_size: raise ValueError(F'You have passed {batch_size} batch_size, but only {len(UpperCamelCase__ )} generators.' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' ) if isinstance(UpperCamelCase__ , torch.Generator ) and batch_size > 1: _snake_case : Dict = [generator] + [None] * (batch_size - 1) _snake_case : int = [ ("model", self.coca_model is None), ("tokenizer", self.coca_tokenizer is None), ("transform", self.coca_transform is None), ] _snake_case : Tuple = [x[0] for x in coca_is_none if x[1]] _snake_case : List[str] = ", ".join(UpperCamelCase__ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(UpperCamelCase__ ): raise ValueError( F'Content prompt is None and CoCa [{coca_is_none_str}] is None.' F'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' ) _snake_case : Tuple = self.get_image_description(UpperCamelCase__ ) if style_prompt is None: if len(UpperCamelCase__ ): raise ValueError( F'Style prompt is None and CoCa [{coca_is_none_str}] is None.' F' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' ) _snake_case : Union[str, Any] = self.get_image_description(UpperCamelCase__ ) # get prompt text embeddings for content and style _snake_case : List[str] = self.tokenizer( UpperCamelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="pt" , ) _snake_case : List[str] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] _snake_case : int = self.tokenizer( UpperCamelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="pt" , ) _snake_case : Dict = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] _snake_case : str = slerp(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # duplicate text embeddings for each generation per prompt _snake_case : Tuple = text_embeddings.repeat_interleave(UpperCamelCase__ , dim=0 ) # set timesteps _snake_case : Optional[Any] = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) _snake_case : str = {} if accepts_offset: _snake_case : Tuple = 1 self.scheduler.set_timesteps(UpperCamelCase__ , **UpperCamelCase__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) _snake_case , _snake_case : List[Any] = self.get_timesteps(UpperCamelCase__ , UpperCamelCase__ , self.device ) _snake_case : Any = timesteps[:1].repeat(UpperCamelCase__ ) # Preprocess image _snake_case : List[str] = preprocess(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _snake_case : int = self.prepare_latents( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , text_embeddings.dtype , self.device , UpperCamelCase__ ) _snake_case : Dict = preprocess(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _snake_case : Dict = self.prepare_latents( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , text_embeddings.dtype , self.device , UpperCamelCase__ ) _snake_case : List[str] = slerp(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if clip_guidance_scale > 0: _snake_case : Optional[int] = self.get_clip_image_embeddings(UpperCamelCase__ , UpperCamelCase__ ) _snake_case : List[Any] = self.get_clip_image_embeddings(UpperCamelCase__ , UpperCamelCase__ ) _snake_case : str = slerp( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. _snake_case : Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: _snake_case : List[str] = content_text_input.input_ids.shape[-1] _snake_case : Optional[Any] = self.tokenizer([""] , padding="max_length" , max_length=UpperCamelCase__ , return_tensors="pt" ) _snake_case : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt _snake_case : Any = uncond_embeddings.repeat_interleave(UpperCamelCase__ , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _snake_case : Tuple = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. _snake_case : Optional[Any] = (batch_size, self.unet.config.in_channels, height // 8, width // 8) _snake_case : Tuple = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps _snake_case : Tuple = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device="cpu" , dtype=UpperCamelCase__ ).to( self.device ) else: _snake_case : Optional[int] = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ ) else: if latents.shape != latents_shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' ) _snake_case : int = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _snake_case : List[str] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _snake_case : str = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _snake_case : Optional[int] = {} if accepts_eta: _snake_case : Any = eta # check if the scheduler accepts generator _snake_case : str = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: _snake_case : Union[str, Any] = generator with self.progress_bar(total=UpperCamelCase__ ): for i, t in enumerate(UpperCamelCase__ ): # expand the latents if we are doing classifier free guidance _snake_case : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _snake_case : Tuple = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ ) # predict the noise residual _snake_case : Tuple = self.unet(UpperCamelCase__ , UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ ).sample # perform classifier free guidance if do_classifier_free_guidance: _snake_case , _snake_case : Dict = noise_pred.chunk(2 ) _snake_case : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: _snake_case : str = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) _snake_case , _snake_case : Union[str, Any] = self.cond_fn( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) # compute the previous noisy sample x_t -> x_t-1 _snake_case : List[Any] = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _snake_case : Optional[int] = 1 / 0.18215 * latents _snake_case : Dict = self.vae.decode(UpperCamelCase__ ).sample _snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 ) _snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _snake_case : List[str] = self.numpy_to_pil(UpperCamelCase__ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=UpperCamelCase__ , nsfw_content_detected=UpperCamelCase__ )
700
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class _UpperCAmelCase ( unittest.TestCase): def lowerCamelCase__ ( self ): _snake_case : List[Any] = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) _snake_case : List[Any] = Vector() def lowerCamelCase__ ( self ): _snake_case : Any = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(snake_case_ ) , "(0,0,0,0,0,1)" ) def lowerCamelCase__ ( self ): _snake_case : Dict = Vector([1, 2, 3, 4] ) self.assertEqual(len(snake_case_ ) , 4 ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = Vector([1, 2] ) _snake_case : List[str] = Vector([1, 2, 3, 4, 5] ) _snake_case : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) _snake_case : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = Vector([1, 2, 3] ) _snake_case : Any = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def lowerCamelCase__ ( self ): _snake_case : str = Vector([1, 2, 3] ) _snake_case : Union[str, Any] = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def lowerCamelCase__ ( self ): _snake_case : Optional[int] = Vector([1, 2, 3] ) _snake_case : List[Any] = Vector([2, -1, 4] ) # for test of dot product _snake_case : Union[str, Any] = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" ) self.assertEqual((a * b) , 0 ) def lowerCamelCase__ ( self ): self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 ) def lowerCamelCase__ ( self ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" ) def lowerCamelCase__ ( self ): _snake_case : Tuple = Vector([1, 2, 3] ) _snake_case : Optional[Any] = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , snake_case_ , snake_case_ ) ) , "(3,4,7)" ) def lowerCamelCase__ ( self ): _snake_case : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] ) _snake_case : Optional[int] = x.copy() self.assertEqual(str(snake_case_ ) , str(snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : Dict = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(snake_case_ ) , "(0,1,0)" ) def lowerCamelCase__ ( self ): _snake_case : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(snake_case_ , snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : Optional[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(snake_case_ , snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def lowerCamelCase__ ( self ): _snake_case : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) _snake_case : List[str] = Vector([1, 2, 3] ) self.assertEqual("(14,32,50)" , str(a * x ) ) self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) ) def lowerCamelCase__ ( self ): _snake_case : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def lowerCamelCase__ ( self ): _snake_case : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) ) def lowerCamelCase__ ( self ): _snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) ) def lowerCamelCase__ ( self ): self.assertEqual( "|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
87
0
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf _a : List[Any] = logging.get_logger(__name__) @dataclass class _UpperCAmelCase ( snake_case_): __lowercase : List[str] = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self , **snake_case_ ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: _snake_case : List[Any] = deprecated_arg[3:] _snake_case : Tuple = not kwargs.pop(snake_case_ ) logger.warning( F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or' F' {positive_arg}={kwargs[positive_arg]}' ) _snake_case : Any = kwargs.pop("tpu_name" , self.tpu_name ) _snake_case : Optional[Any] = kwargs.pop("device_idx" , self.device_idx ) _snake_case : Optional[int] = kwargs.pop("eager_mode" , self.eager_mode ) _snake_case : Tuple = kwargs.pop("use_xla" , self.use_xla ) super().__init__(**snake_case_ ) __lowercase : str = field( default=snake_case_ , metadata={"""help""": """Name of TPU"""} , ) __lowercase : int = field( default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , ) __lowercase : bool = field(default=snake_case_ , metadata={"""help""": """Benchmark models in eager model."""}) __lowercase : bool = field( default=snake_case_ , metadata={ """help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.""" } , ) @cached_property def lowerCamelCase__ ( self ): requires_backends(self , ["tf"] ) _snake_case : Tuple = None if self.tpu: try: if self.tpu_name: _snake_case : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: _snake_case : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: _snake_case : int = None return tpu @cached_property def lowerCamelCase__ ( self ): requires_backends(self , ["tf"] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) _snake_case : Dict = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" ) _snake_case : Optional[int] = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' ) else: tf.config.set_visible_devices([] , "GPU" ) # disable GPU _snake_case : str = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' ) return strategy @property def lowerCamelCase__ ( self ): requires_backends(self , ["tf"] ) return self._setup_tpu is not None @property def lowerCamelCase__ ( self ): requires_backends(self , ["tf"] ) return self._setup_strategy @property def lowerCamelCase__ ( self ): requires_backends(self , ["tf"] ) return tf.config.list_physical_devices("GPU" ) @property def lowerCamelCase__ ( self ): requires_backends(self , ["tf"] ) if self.cuda: return len(self.gpu_list ) return 0 @property def lowerCamelCase__ ( self ): return self.n_gpu > 0
701
"""simple docstring""" from __future__ import annotations from collections import namedtuple def a__ ( a : float , a : float , a : float ): """simple docstring""" _snake_case : Optional[Any] = namedtuple("result" , "name value" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("Only one argument must be 0" ) elif power < 0: raise ValueError( "Power cannot be negative in any electrical/electronics system" ) elif voltage == 0: return result("voltage" , power / current ) elif current == 0: return result("current" , power / voltage ) elif power == 0: return result("power" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
87
0
"""simple docstring""" import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer _a : Union[str, Any] = """bart""" _a : str = True @st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE_ ) def a__ ( ): """simple docstring""" if LOAD_DENSE_INDEX: _snake_case : Dict = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" ) _snake_case : Optional[int] = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" ) _snake_case : int = qar_model.eval() else: _snake_case : Union[str, Any] = (None, None) if MODEL_TYPE == "bart": _snake_case : List[str] = AutoTokenizer.from_pretrained("yjernite/bart_eli5" ) _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" ) _snake_case : Optional[int] = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" ) sas_model.load_state_dict(save_dict["model"] ) _snake_case : List[str] = sas_model.eval() else: _snake_case : List[str] = make_qa_sas_model( model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE_ ) def a__ ( ): """simple docstring""" if LOAD_DENSE_INDEX: _snake_case : List[Any] = faiss.StandardGpuResources() _snake_case : List[str] = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"] _snake_case : Any = np.memmap( "wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , ) _snake_case : Any = faiss.IndexFlatIP(128 ) _snake_case : int = faiss.index_cpu_to_gpu(SCREAMING_SNAKE_CASE_ , 1 , SCREAMING_SNAKE_CASE_ ) wikiaab_gpu_index_flat.add(SCREAMING_SNAKE_CASE_ ) # TODO fix for larger GPU else: _snake_case : str = (None, None) _snake_case : Tuple = Elasticsearch([{"host": "localhost", "port": "9200"}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE_ ) def a__ ( ): """simple docstring""" _snake_case : str = datasets.load_dataset("eli5" , name="LFQA_reddit" ) _snake_case : Tuple = elia["train_eli5"] _snake_case : Tuple = np.memmap( "eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) ) _snake_case : Dict = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(SCREAMING_SNAKE_CASE_ ) return (elia_train, eli5_train_q_index) _a, _a, _a : int = load_indexes() _a, _a, _a, _a : List[Any] = load_models() _a, _a : Optional[Any] = load_train_data() def a__ ( a : Union[str, Any] , a : Any=10 ): """simple docstring""" _snake_case : List[str] = embed_questions_for_retrieval([question] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _snake_case : List[str] = eli5_train_q_index.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _snake_case : str = [elia_train[int(SCREAMING_SNAKE_CASE_ )] for i in I[0]] return nn_examples def a__ ( a : Optional[Any] , a : Union[str, Any]="wiki40b" , a : List[str]="dense" , a : Tuple=10 ): """simple docstring""" if source == "none": _snake_case : List[Any] = (" <P> ".join(["" for _ in range(11 )] ).strip(), []) else: if method == "dense": _snake_case : Optional[int] = query_qa_dense_index( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: _snake_case : Dict = query_es_index( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index_name="english_wiki40b_snippets_100w" , n_results=SCREAMING_SNAKE_CASE_ , ) _snake_case : Tuple = [ (res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst ] _snake_case : List[Any] = "question: {} context: {}".format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda a : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None), } ) def a__ ( a : str , a : Any , a : str , a : Optional[int]=64 , a : Union[str, Any]=256 , a : List[str]=False , a : Union[str, Any]=2 , a : Union[str, Any]=0.95 , a : int=0.8 ): """simple docstring""" with torch.no_grad(): _snake_case : Optional[Any] = qa_sas_generate( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_answers=1 , num_beams=SCREAMING_SNAKE_CASE_ , min_len=SCREAMING_SNAKE_CASE_ , max_len=SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , temp=SCREAMING_SNAKE_CASE_ , top_p=SCREAMING_SNAKE_CASE_ , top_k=SCREAMING_SNAKE_CASE_ , max_input_length=1_024 , device="cuda:0" , )[0] return (answer, support_list) st.title("""Long Form Question Answering with ELI5""") # Start sidebar _a : Tuple = """<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>""" _a : Any = """\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n""" % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia _a : Any = """\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n""" st.sidebar.markdown(description, unsafe_allow_html=True) _a : Optional[int] = [ """Answer the question""", """View the retrieved document only""", """View the most similar ELI5 question and answer""", """Show me everything, please!""", ] _a : Optional[int] = st.sidebar.checkbox("""Demo options""") if demo_options: _a : str = st.sidebar.selectbox( """""", action_list, index=3, ) _a : Tuple = action_list.index(action_st) _a : Any = st.sidebar.selectbox( """""", ["""Show full text of passages""", """Show passage section titles"""], index=0, ) _a : str = show_type == """Show full text of passages""" else: _a : int = 3 _a : Union[str, Any] = True _a : int = st.sidebar.checkbox("""Retrieval options""") if retrieval_options: _a : Union[str, Any] = """\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n """ st.sidebar.markdown(retriever_info) _a : Union[str, Any] = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""]) _a : Tuple = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""]) else: _a : Optional[Any] = """wiki40b""" _a : List[str] = """dense""" _a : str = """beam""" _a : List[str] = 2 _a : Union[str, Any] = 64 _a : Tuple = 256 _a : Any = None _a : Optional[int] = None _a : Tuple = st.sidebar.checkbox("""Generation options""") if generate_options: _a : List[str] = """\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n """ st.sidebar.markdown(generate_info) _a : List[str] = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""]) _a : Tuple = st.sidebar.slider( """Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) _a : Tuple = st.sidebar.slider( """Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": _a : str = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: _a : List[str] = st.sidebar.slider( """Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) _a : str = st.sidebar.slider( """Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) _a : List[Any] = None # start main text _a : Any = [ """<MY QUESTION>""", """How do people make chocolate?""", """Why do we get a fever when we are sick?""", """How can different animals perceive different colors?""", """What is natural language processing?""", """What\'s the best way to treat a sunburn?""", """What exactly are vitamins ?""", """How does nuclear energy provide electricity?""", """What\'s the difference between viruses and bacteria?""", """Why are flutes classified as woodwinds when most of them are made out of metal ?""", """Why do people like drinking coffee even though it tastes so bad?""", """What happens when wine ages? How does it make the wine taste better?""", """If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""", """How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?""", """How does New Zealand have so many large bird predators?""", ] _a : int = st.selectbox( """What would you like to ask? ---- select <MY QUESTION> to enter a new query""", questions_list, index=1, ) if question_s == "<MY QUESTION>": _a : Optional[Any] = st.text_input("""Enter your question here:""", """""") else: _a : Dict = question_s if st.button("""Show me!"""): if action in [0, 1, 3]: if index_type == "mixed": _a, _a : List[str] = make_support(question, source=wiki_source, method="""dense""", n_results=10) _a, _a : Any = make_support(question, source=wiki_source, method="""sparse""", n_results=10) _a : List[Any] = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] _a : Optional[int] = support_list[:10] _a : Any = """<P> """ + """ <P> """.join([res[-1] for res in support_list]) else: _a, _a : str = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: _a, _a : int = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == """sampled"""), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("""### The model generated answer is:""") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""") for i, res in enumerate(support_list): _a : str = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_""")) _a : Tuple = res[1].strip() if sec_titles == "": _a : Optional[Any] = """[{}]({})""".format(res[0], wiki_url) else: _a : Optional[Any] = sec_titles.split(""" & """) _a : List[Any] = """ & """.join( ["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list] ) st.markdown( """{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( """> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True ) if action in [2, 3]: _a : Union[str, Any] = find_nearest_training(question) _a : Union[str, Any] = nn_train_list[0] st.markdown( """--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""]) ) _a : List[Any] = [ """{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""])) for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""])) if i == 0 or sc > 2 ] st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st))) _a : Union[str, Any] = """\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n""" st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
702
"""simple docstring""" import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _UpperCAmelCase ( _snake_case , unittest.TestCase): __lowercase : Any = TextToVideoSDPipeline __lowercase : str = TEXT_TO_IMAGE_PARAMS __lowercase : int = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __lowercase : Optional[int] = frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ]) def lowerCamelCase__ ( self ): torch.manual_seed(0 ) _snake_case : str = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , ) _snake_case : List[Any] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , ) torch.manual_seed(0 ) _snake_case : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) _snake_case : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , ) _snake_case : Tuple = CLIPTextModel(snake_case_ ) _snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _snake_case : Any = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def lowerCamelCase__ ( self , snake_case_ , snake_case_=0 ): if str(snake_case_ ).startswith("mps" ): _snake_case : str = torch.manual_seed(snake_case_ ) else: _snake_case : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) _snake_case : str = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def lowerCamelCase__ ( self ): _snake_case : int = "cpu" # ensure determinism for the device-dependent torch.Generator _snake_case : Optional[Any] = self.get_dummy_components() _snake_case : Tuple = TextToVideoSDPipeline(**snake_case_ ) _snake_case : List[str] = sd_pipe.to(snake_case_ ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) _snake_case : int = self.get_dummy_inputs(snake_case_ ) _snake_case : Union[str, Any] = "np" _snake_case : Dict = sd_pipe(**snake_case_ ).frames _snake_case : Any = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) _snake_case : Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ ( self ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def lowerCamelCase__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=1E-2 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def lowerCamelCase__ ( self ): pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def lowerCamelCase__ ( self ): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): return super().test_progress_bar() @slow @skip_mps class _UpperCAmelCase ( unittest.TestCase): def lowerCamelCase__ ( self ): _snake_case : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" ) _snake_case : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) _snake_case : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) _snake_case : Tuple = pipe.to("cuda" ) _snake_case : List[Any] = "Spiderman is surfing" _snake_case : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) _snake_case : int = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type="pt" ).frames _snake_case : int = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def lowerCamelCase__ ( self ): _snake_case : Any = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" ) _snake_case : str = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) _snake_case : int = pipe.to("cuda" ) _snake_case : Any = "Spiderman is surfing" _snake_case : str = torch.Generator(device="cpu" ).manual_seed(0 ) _snake_case : Any = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="pt" ).frames _snake_case : Optional[int] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
87
0
"""simple docstring""" import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def a__ ( a : int ): """simple docstring""" _snake_case : Optional[int] = fname.split(os.path.sep )[-1] return re.search(R"^(.*)_\d+\.jpg$" , SCREAMING_SNAKE_CASE_ ).groups()[0] class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_=None , snake_case_=None ): _snake_case : Optional[int] = file_names _snake_case : str = image_transform _snake_case : Tuple = label_to_id def __len__( self ): return len(self.file_names ) def __getitem__( self , snake_case_ ): _snake_case : Tuple = self.file_names[idx] _snake_case : int = PIL.Image.open(snake_case_ ) _snake_case : Optional[Any] = raw_image.convert("RGB" ) if self.image_transform is not None: _snake_case : Any = self.image_transform(snake_case_ ) _snake_case : Dict = extract_label(snake_case_ ) if self.label_to_id is not None: _snake_case : Optional[Any] = self.label_to_id[label] return {"image": image, "label": label} def a__ ( a : Dict , a : str ): """simple docstring""" if args.with_tracking: _snake_case : List[str] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir ) else: _snake_case : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _snake_case : Union[str, Any] = config['lr'] _snake_case : str = int(config["num_epochs"] ) _snake_case : List[Any] = int(config["seed"] ) _snake_case : Optional[int] = int(config["batch_size"] ) _snake_case : List[str] = config['image_size'] if not isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ): _snake_case : str = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , "isdigit" ): if args.checkpointing_steps == "epoch": _snake_case : List[Any] = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): _snake_case : List[Any] = int(args.checkpointing_steps ) else: raise ValueError( f'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' ) else: _snake_case : Union[str, Any] = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: _snake_case : Optional[Any] = os.path.split(SCREAMING_SNAKE_CASE_ )[-1].split("." )[0] accelerator.init_trackers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Grab all the image filenames _snake_case : List[Any] = [os.path.join(args.data_dir , SCREAMING_SNAKE_CASE_ ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )] # Build the label correspondences _snake_case : Optional[int] = [extract_label(SCREAMING_SNAKE_CASE_ ) for fname in file_names] _snake_case : Tuple = list(set(SCREAMING_SNAKE_CASE_ ) ) id_to_label.sort() _snake_case : int = {lbl: i for i, lbl in enumerate(SCREAMING_SNAKE_CASE_ )} # Set the seed before splitting the data. np.random.seed(SCREAMING_SNAKE_CASE_ ) torch.manual_seed(SCREAMING_SNAKE_CASE_ ) torch.cuda.manual_seed_all(SCREAMING_SNAKE_CASE_ ) # Split our filenames between train and validation _snake_case : Any = np.random.permutation(len(SCREAMING_SNAKE_CASE_ ) ) _snake_case : List[str] = int(0.8 * len(SCREAMING_SNAKE_CASE_ ) ) _snake_case : str = random_perm[:cut] _snake_case : Tuple = random_perm[cut:] # For training we use a simple RandomResizedCrop _snake_case : int = Compose([RandomResizedCrop(SCREAMING_SNAKE_CASE_ , scale=(0.5, 1.0) ), ToTensor()] ) _snake_case : Optional[Any] = PetsDataset( [file_names[i] for i in train_split] , image_transform=SCREAMING_SNAKE_CASE_ , label_to_id=SCREAMING_SNAKE_CASE_ ) # For evaluation, we use a deterministic Resize _snake_case : List[str] = Compose([Resize(SCREAMING_SNAKE_CASE_ ), ToTensor()] ) _snake_case : str = PetsDataset([file_names[i] for i in eval_split] , image_transform=SCREAMING_SNAKE_CASE_ , label_to_id=SCREAMING_SNAKE_CASE_ ) # Instantiate dataloaders. _snake_case : Dict = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 ) _snake_case : Union[str, Any] = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _snake_case : Tuple = create_model("resnet50d" , pretrained=SCREAMING_SNAKE_CASE_ , num_classes=len(SCREAMING_SNAKE_CASE_ ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _snake_case : List[Any] = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): _snake_case : Any = False for param in model.get_classifier().parameters(): _snake_case : List[Any] = True # We normalize the batches of images to be a bit faster. _snake_case : Optional[int] = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device ) _snake_case : int = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer _snake_case : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler _snake_case : str = OneCycleLR(optimizer=SCREAMING_SNAKE_CASE_ , max_lr=SCREAMING_SNAKE_CASE_ , epochs=SCREAMING_SNAKE_CASE_ , steps_per_epoch=len(SCREAMING_SNAKE_CASE_ ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _snake_case : List[str] = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # We need to keep track of how many total steps we have iterated over _snake_case : str = 0 # We also need to keep track of the starting epoch so files are named properly _snake_case : List[str] = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}' ) accelerator.load_state(args.resume_from_checkpoint ) _snake_case : List[Any] = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint _snake_case : str = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) _snake_case : str = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` _snake_case : Optional[int] = os.path.splitext(SCREAMING_SNAKE_CASE_ )[0] if "epoch" in training_difference: _snake_case : Union[str, Any] = int(training_difference.replace("epoch_" , "" ) ) + 1 _snake_case : Tuple = None else: _snake_case : Any = int(training_difference.replace("step_" , "" ) ) _snake_case : int = resume_step // len(SCREAMING_SNAKE_CASE_ ) resume_step -= starting_epoch * len(SCREAMING_SNAKE_CASE_ ) # Now we train the model for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): model.train() if args.with_tracking: _snake_case : Tuple = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step _snake_case : str = accelerator.skip_first_batches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader _snake_case : List[Any] = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. _snake_case : Optional[int] = {k: v.to(accelerator.device ) for k, v in batch.items()} _snake_case : Dict = (batch['image'] - mean) / std _snake_case : Any = model(SCREAMING_SNAKE_CASE_ ) _snake_case : Any = torch.nn.functional.cross_entropy(SCREAMING_SNAKE_CASE_ , batch["label"] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(SCREAMING_SNAKE_CASE_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _snake_case : Optional[Any] = f'step_{overall_step}' if overall_step % checkpointing_steps == 0: if args.output_dir is not None: _snake_case : Union[str, Any] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ ) accelerator.save_state(SCREAMING_SNAKE_CASE_ ) model.eval() _snake_case : Optional[int] = 0 _snake_case : Optional[int] = 0 for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. _snake_case : Tuple = {k: v.to(accelerator.device ) for k, v in batch.items()} _snake_case : str = (batch['image'] - mean) / std with torch.no_grad(): _snake_case : List[str] = model(SCREAMING_SNAKE_CASE_ ) _snake_case : Dict = outputs.argmax(dim=-1 ) _snake_case : Optional[Any] = accelerator.gather_for_metrics((predictions, batch["label"]) ) _snake_case : Optional[Any] = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() _snake_case : List[Any] = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}: {100 * eval_metric:.2f}' ) if args.with_tracking: accelerator.log( { "accuracy": 100 * eval_metric, "train_loss": total_loss.item() / len(SCREAMING_SNAKE_CASE_ ), "epoch": epoch, } , step=SCREAMING_SNAKE_CASE_ , ) if checkpointing_steps == "epoch": _snake_case : List[str] = f'epoch_{epoch}' if args.output_dir is not None: _snake_case : int = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ ) accelerator.save_state(SCREAMING_SNAKE_CASE_ ) if args.with_tracking: accelerator.end_training() def a__ ( ): """simple docstring""" _snake_case : Optional[Any] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument("--data_dir" , required=SCREAMING_SNAKE_CASE_ , help="The data folder on disk." ) parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." ) parser.add_argument( "--mixed_precision" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) parser.add_argument( "--checkpointing_steps" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch." , ) parser.add_argument( "--output_dir" , type=SCREAMING_SNAKE_CASE_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--resume_from_checkpoint" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="If the training should continue from a checkpoint folder." , ) parser.add_argument( "--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , ) parser.add_argument( "--project_dir" , type=SCREAMING_SNAKE_CASE_ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , ) _snake_case : Optional[int] = parser.parse_args() _snake_case : List[Any] = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224} training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
703
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class _UpperCAmelCase ( _snake_case): __lowercase : int = """EncodecFeatureExtractor""" __lowercase : str = ("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self , snake_case_ , snake_case_ ): super().__init__(snake_case_ , snake_case_ ) _snake_case : Dict = self.feature_extractor _snake_case : Any = False def lowerCamelCase__ ( self , snake_case_=None , snake_case_=None , snake_case_=True ): return self.tokenizer.get_decoder_prompt_ids(task=snake_case_ , language=snake_case_ , no_timestamps=snake_case_ ) def __call__( self , *snake_case_ , **snake_case_ ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*snake_case_ , **snake_case_ ) _snake_case : str = kwargs.pop("audio" , snake_case_ ) _snake_case : Optional[int] = kwargs.pop("sampling_rate" , snake_case_ ) _snake_case : Optional[Any] = kwargs.pop("text" , snake_case_ ) if len(snake_case_ ) > 0: _snake_case : Any = args[0] _snake_case : Union[str, Any] = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if text is not None: _snake_case : Any = self.tokenizer(snake_case_ , **snake_case_ ) if audio is not None: _snake_case : Any = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ ) if audio is None: return inputs elif text is None: return audio_inputs else: _snake_case : str = audio_inputs["input_values"] if "padding_mask" in audio_inputs: _snake_case : List[str] = audio_inputs["padding_mask"] return inputs def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ): _snake_case : Tuple = kwargs.pop("audio" , snake_case_ ) _snake_case : List[str] = kwargs.pop("padding_mask" , snake_case_ ) if len(snake_case_ ) > 0: _snake_case : Tuple = args[0] _snake_case : Dict = args[1:] if audio_values is not None: return self._decode_audio(snake_case_ , padding_mask=snake_case_ ) else: return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ ) def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ): return self.tokenizer.decode(*snake_case_ , **snake_case_ ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ): _snake_case : Optional[int] = to_numpy(snake_case_ ) _snake_case , _snake_case , _snake_case : Tuple = audio_values.shape if padding_mask is None: return list(snake_case_ ) _snake_case : Optional[int] = to_numpy(snake_case_ ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) _snake_case : Any = seq_len - padding_mask.shape[-1] _snake_case : Optional[Any] = 1 - self.feature_extractor.padding_value _snake_case : Optional[int] = np.pad(snake_case_ , ((0, 0), (0, difference)) , "constant" , constant_values=snake_case_ ) _snake_case : Any = audio_values.tolist() for i in range(snake_case_ ): _snake_case : Tuple = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] _snake_case : Tuple = sliced_audio.reshape(snake_case_ , -1 ) return audio_values
87
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _a : List[str] = { """configuration_blenderbot_small""": [ """BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlenderbotSmallConfig""", """BlenderbotSmallOnnxConfig""", ], """tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[int] = ["""BlenderbotSmallTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[Any] = [ """BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlenderbotSmallForCausalLM""", """BlenderbotSmallForConditionalGeneration""", """BlenderbotSmallModel""", """BlenderbotSmallPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = [ """TFBlenderbotSmallForConditionalGeneration""", """TFBlenderbotSmallModel""", """TFBlenderbotSmallPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = [ """FlaxBlenderbotSmallForConditionalGeneration""", """FlaxBlenderbotSmallModel""", """FlaxBlenderbotSmallPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys _a : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
704
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _a : str = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[Any] = ["""YolosFeatureExtractor"""] _a : List[Any] = ["""YolosImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Union[str, Any] = [ """YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""", """YolosForObjectDetection""", """YolosModel""", """YolosPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys _a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def a__ ( a : List[str] ): """simple docstring""" def is_in_circle(a : Tuple , a : List[str] ) -> bool: _snake_case : Tuple = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle _snake_case : Optional[Any] = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(__snake_case ) ) # The ratio of the area for circle to square is pi/4. _snake_case : Tuple = proportion * 4 print(f'The estimated value of pi is {pi_estimate}' ) print(f'The numpy value of pi is {pi}' ) print(f'The total error is {abs(pi - pi_estimate )}' ) def a__ ( a : Dict , a : Union[str, Any] , a : Any = 0.0 , a : Optional[Any] = 1.0 , ): """simple docstring""" return mean( function_to_integrate(uniform(__snake_case , __snake_case ) ) for _ in range(__snake_case ) ) * (max_value - min_value) def a__ ( a : Optional[Any] , a : Dict = 0.0 , a : List[str] = 1.0 ): """simple docstring""" def identity_function(a : str ) -> float: return x _snake_case : Dict = area_under_curve_estimator( __snake_case , __snake_case , __snake_case , __snake_case ) _snake_case : Optional[int] = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(f'Estimating area under y=x where x varies from {min_value} to {max_value}' ) print(f'Estimated value is {estimated_value}' ) print(f'Expected value is {expected_value}' ) print(f'Total error is {abs(estimated_value - expected_value )}' ) print("******************" ) def a__ ( a : int ): """simple docstring""" def function_to_integrate(a : Tuple ) -> float: return sqrt(4.0 - x * x ) _snake_case : Any = area_under_curve_estimator( __snake_case , __snake_case , 0.0 , 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(f'Estimated value is {estimated_value}' ) print(f'Expected value is {pi}' ) print(f'Total error is {abs(estimated_value - pi )}' ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
705
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Optional[int] = dataset _snake_case : str = process _snake_case : int = params def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): _snake_case : Union[str, Any] = self.dataset[i] _snake_case : Optional[Any] = self.process(snake_case_ , **self.params ) return processed class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ): _snake_case : Union[str, Any] = loader _snake_case : Tuple = infer _snake_case : List[Any] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _snake_case : int = None _snake_case : int = loader_batch_size # Internal bookkeeping _snake_case : Any = None _snake_case : Dict = None def __len__( self ): return len(self.loader ) def __iter__( self ): _snake_case : int = iter(self.loader ) return self def lowerCamelCase__ ( self ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _snake_case : List[Any] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _snake_case : int = {} for k, element in self._loader_batch_data.items(): if isinstance(snake_case_ , snake_case_ ): # Convert ModelOutput to tuple first _snake_case : Tuple = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _snake_case : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _snake_case : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case_ , snake_case_ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _snake_case : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _snake_case : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _snake_case : Tuple = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _snake_case : Union[str, Any] = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _snake_case : List[Any] = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _snake_case : int = self._loader_batch_data.__class__(snake_case_ ) self._loader_batch_index += 1 return result def lowerCamelCase__ ( self ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _snake_case : Tuple = next(self.iterator ) _snake_case : Any = self.infer(snake_case_ , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(snake_case_ , torch.Tensor ): _snake_case : Union[str, Any] = processed else: _snake_case : Optional[int] = list(processed.keys() )[0] _snake_case : List[str] = processed[key] if isinstance(snake_case_ , snake_case_ ): _snake_case : Dict = len(snake_case_ ) else: _snake_case : Optional[int] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _snake_case : Union[str, Any] = observed_batch_size # Setting internal index to unwrap the batch _snake_case : str = processed _snake_case : List[Any] = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ): super().__init__(snake_case_ , snake_case_ , snake_case_ ) def __iter__( self ): _snake_case : Tuple = iter(self.loader ) _snake_case : List[Any] = None return self def lowerCamelCase__ ( self ): if self.subiterator is None: _snake_case : Optional[Any] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _snake_case : Union[str, Any] = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _snake_case : str = self.infer(next(self.iterator ) , **self.params ) _snake_case : Tuple = next(self.subiterator ) return processed class _UpperCAmelCase ( _snake_case): def __iter__( self ): _snake_case : Optional[Any] = iter(self.loader ) return self def lowerCamelCase__ ( self ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. _snake_case : Optional[Any] = False _snake_case : Tuple = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _snake_case : Union[str, Any] = self.loader_batch_item() _snake_case : str = item.pop("is_last" ) accumulator.append(snake_case_ ) if is_last: return accumulator while not is_last: _snake_case : List[str] = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(snake_case_ , torch.Tensor ): _snake_case : Union[str, Any] = processed else: _snake_case : Tuple = list(processed.keys() )[0] _snake_case : Tuple = processed[key] if isinstance(snake_case_ , snake_case_ ): _snake_case : Any = len(snake_case_ ) else: _snake_case : List[Any] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _snake_case : Dict = observed_batch_size _snake_case : List[Any] = processed _snake_case : List[str] = 0 while self._loader_batch_index < self.loader_batch_size: _snake_case : Union[str, Any] = self.loader_batch_item() _snake_case : int = item.pop("is_last" ) accumulator.append(snake_case_ ) if is_last: return accumulator else: _snake_case : Dict = processed _snake_case : Dict = item.pop("is_last" ) accumulator.append(snake_case_ ) return accumulator class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ ): _snake_case : str = dataset _snake_case : Any = key def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): return self.dataset[i][self.key] class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : int = dataset _snake_case : Any = keya _snake_case : int = keya def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
87
0
"""simple docstring""" import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments lowerCamelCase_ : Dict = logging.getLogger(__name__) @dataclass class _UpperCAmelCase ( __a): __lowercase : Optional[float] = field( default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""}) __lowercase : bool = field(default=__a , metadata={"""help""": """Whether to SortishSamler or not."""}) __lowercase : bool = field( default=__a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""}) __lowercase : bool = field(default=__a , metadata={"""help""": """whether to use adafactor"""}) __lowercase : Optional[float] = field( default=__a , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""}) __lowercase : Optional[float] = field( default=__a , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""}) __lowercase : Optional[float] = field(default=__a , metadata={"""help""": """Dropout probability. Goes into model.config."""}) __lowercase : Optional[float] = field( default=__a , metadata={"""help""": """Attention dropout probability. Goes into model.config."""}) __lowercase : Optional[str] = field( default="""linear""" , metadata={"""help""": F'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys())}'} , )
706
"""simple docstring""" def a__ ( a : int ): """simple docstring""" if not isinstance(a , a ): raise TypeError("Input value must be an 'int' type" ) _snake_case : Union[str, Any] = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
87
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Union[str, Any] = logging.get_logger(__name__) _a : Union[str, Any] = { """SCUT-DLVCLab/lilt-roberta-en-base""": ( """https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json""" ), } class _UpperCAmelCase ( __lowerCamelCase): __lowercase : Tuple = '''lilt''' def __init__( self , snake_case_=3_05_22 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0 , snake_case_="absolute" , snake_case_=None , snake_case_=4 , snake_case_=10_24 , **snake_case_ , ): super().__init__(pad_token_id=a_ , **a_ ) _snake_case : Dict = vocab_size _snake_case : Tuple = hidden_size _snake_case : List[str] = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : Union[str, Any] = hidden_act _snake_case : List[Any] = intermediate_size _snake_case : Dict = hidden_dropout_prob _snake_case : Optional[int] = attention_probs_dropout_prob _snake_case : str = max_position_embeddings _snake_case : Union[str, Any] = type_vocab_size _snake_case : int = initializer_range _snake_case : Optional[int] = layer_norm_eps _snake_case : Tuple = position_embedding_type _snake_case : Union[str, Any] = classifier_dropout _snake_case : List[str] = channel_shrink_ratio _snake_case : Union[str, Any] = max_ad_position_embeddings
707
"""simple docstring""" from __future__ import annotations import requests _a : List[str] = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def a__ ( a : str , a : int = 1 , a : str = "new" , a : list | None = None ): """simple docstring""" _snake_case : Any = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(a ) - valid_terms ) ): _snake_case : Optional[int] = f'Invalid search term: {invalid_search_terms}' raise ValueError(a ) _snake_case : int = requests.get( f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , ) if response.status_code == 429: raise requests.HTTPError _snake_case : Optional[Any] = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(a )} _snake_case : Tuple = {} for id_ in range(a ): _snake_case : List[str] = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
87
0
"""simple docstring""" def a__ ( a : List[Any] , a : str ): """simple docstring""" _snake_case : List[str] = len(_snake_case ) + 1 _snake_case : int = len(_snake_case ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. _snake_case : Optional[Any] = [[0 for i in range(_snake_case )] for j in range(_snake_case )] # since string of zero length match pattern of zero length _snake_case : Tuple = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , _snake_case ): _snake_case : int = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , _snake_case ): _snake_case : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == "*" else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , _snake_case ): for j in range(1 , _snake_case ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": _snake_case : str = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: _snake_case : Any = 1 elif pattern[j - 2] in (input_string[i - 1], "."): _snake_case : int = dp[i - 1][j] else: _snake_case : Optional[int] = 0 else: _snake_case : str = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") _a : Optional[int] = """aab""" _a : Optional[Any] = """c*a*b""" # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(f'{input_string} matches the given pattern {pattern}') else: print(f'{input_string} does not match with the given pattern {pattern}')
708
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def a__ ( a : float , a : float , a : bool = False ): """simple docstring""" if radian_mode: return [magnitude * cos(a ), magnitude * sin(a )] return [magnitude * cos(radians(a ) ), magnitude * sin(radians(a ) )] def a__ ( a : NDArray[floataa] , a : NDArray[floataa] , a : float = 10**-1 ): """simple docstring""" _snake_case : NDArray[floataa] = cross(a , a ) _snake_case : float = sum(a ) return abs(a ) < eps if __name__ == "__main__": # Test to check if it works _a : Tuple = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) _a : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg _a : List[Any] = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) _a : List[Any] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg _a : List[str] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]]) _a : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
87
0
"""simple docstring""" from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig _a : Optional[int] = logging.get_logger(__name__) # General docstring _a : Any = 'RegNetConfig' # Base docstring _a : Any = 'facebook/regnet-y-040' _a : str = [1, 1_088, 7, 7] # Image classification docstring _a : Optional[int] = 'facebook/regnet-y-040' _a : Any = 'tabby, tabby cat' _a : Any = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class _UpperCAmelCase ( nn.Module): def __init__( self , snake_case_ , snake_case_ , snake_case_ = 3 , snake_case_ = 1 , snake_case_ = 1 , snake_case_ = "relu" , ): super().__init__() _snake_case : str = nn.Convad( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=kernel_size // 2 , groups=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , ) _snake_case : Any = nn.BatchNormad(_SCREAMING_SNAKE_CASE ) _snake_case : Tuple = ACTaFN[activation] if activation is not None else nn.Identity() def lowerCamelCase__ ( self , snake_case_ ): _snake_case : List[str] = self.convolution(_SCREAMING_SNAKE_CASE ) _snake_case : List[str] = self.normalization(_SCREAMING_SNAKE_CASE ) _snake_case : Dict = self.activation(_SCREAMING_SNAKE_CASE ) return hidden_state class _UpperCAmelCase ( nn.Module): def __init__( self , snake_case_ ): super().__init__() _snake_case : str = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) _snake_case : int = config.num_channels def lowerCamelCase__ ( self , snake_case_ ): _snake_case : List[str] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) _snake_case : int = self.embedder(_SCREAMING_SNAKE_CASE ) return hidden_state class _UpperCAmelCase ( nn.Module): def __init__( self , snake_case_ , snake_case_ , snake_case_ = 2 ): super().__init__() _snake_case : int = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , stride=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) _snake_case : Union[str, Any] = nn.BatchNormad(_SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self , snake_case_ ): _snake_case : List[Any] = self.convolution(_SCREAMING_SNAKE_CASE ) _snake_case : str = self.normalization(_SCREAMING_SNAKE_CASE ) return hidden_state class _UpperCAmelCase ( nn.Module): def __init__( self , snake_case_ , snake_case_ ): super().__init__() _snake_case : Any = nn.AdaptiveAvgPoolad((1, 1) ) _snake_case : List[Any] = nn.Sequential( nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ) , nn.ReLU() , nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ) , nn.Sigmoid() , ) def lowerCamelCase__ ( self , snake_case_ ): # b c h w -> b c 1 1 _snake_case : str = self.pooler(_SCREAMING_SNAKE_CASE ) _snake_case : Any = self.attention(_SCREAMING_SNAKE_CASE ) _snake_case : Union[str, Any] = hidden_state * attention return hidden_state class _UpperCAmelCase ( nn.Module): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 ): super().__init__() _snake_case : List[Any] = in_channels != out_channels or stride != 1 _snake_case : Dict = max(1 , out_channels // config.groups_width ) _snake_case : Any = ( RegNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity() ) _snake_case : int = nn.Sequential( RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , groups=_SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE ) , ) _snake_case : List[str] = ACTaFN[config.hidden_act] def lowerCamelCase__ ( self , snake_case_ ): _snake_case : Any = hidden_state _snake_case : Dict = self.layer(_SCREAMING_SNAKE_CASE ) _snake_case : Union[str, Any] = self.shortcut(_SCREAMING_SNAKE_CASE ) hidden_state += residual _snake_case : List[str] = self.activation(_SCREAMING_SNAKE_CASE ) return hidden_state class _UpperCAmelCase ( nn.Module): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 ): super().__init__() _snake_case : List[Any] = in_channels != out_channels or stride != 1 _snake_case : Union[str, Any] = max(1 , out_channels // config.groups_width ) _snake_case : Union[str, Any] = ( RegNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity() ) _snake_case : Any = nn.Sequential( RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , groups=_SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , RegNetSELayer(_SCREAMING_SNAKE_CASE , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE ) , ) _snake_case : Tuple = ACTaFN[config.hidden_act] def lowerCamelCase__ ( self , snake_case_ ): _snake_case : str = hidden_state _snake_case : Optional[Any] = self.layer(_SCREAMING_SNAKE_CASE ) _snake_case : Optional[Any] = self.shortcut(_SCREAMING_SNAKE_CASE ) hidden_state += residual _snake_case : List[str] = self.activation(_SCREAMING_SNAKE_CASE ) return hidden_state class _UpperCAmelCase ( nn.Module): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 2 , snake_case_ = 2 , ): super().__init__() _snake_case : List[Any] = RegNetXLayer if config.layer_type == "x" else RegNetYLayer _snake_case : Optional[Any] = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , ) , *[layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for _ in range(depth - 1 )] , ) def lowerCamelCase__ ( self , snake_case_ ): _snake_case : Any = self.layers(_SCREAMING_SNAKE_CASE ) return hidden_state class _UpperCAmelCase ( nn.Module): def __init__( self , snake_case_ ): super().__init__() _snake_case : List[Any] = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( _SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _snake_case : str = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(_SCREAMING_SNAKE_CASE , config.depths[1:] ): self.stages.append(RegNetStage(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , depth=_SCREAMING_SNAKE_CASE ) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ = False , snake_case_ = True ): _snake_case : Union[str, Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _snake_case : Tuple = hidden_states + (hidden_state,) _snake_case : List[str] = stage_module(_SCREAMING_SNAKE_CASE ) if output_hidden_states: _snake_case : str = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE ) class _UpperCAmelCase ( lowerCAmelCase__): __lowercase : Any = RegNetConfig __lowercase : List[str] = "regnet" __lowercase : Optional[int] = "pixel_values" __lowercase : Tuple = True def lowerCamelCase__ ( self , snake_case_ ): if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" ) elif isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowerCamelCase__ ( self , snake_case_ , snake_case_=False ): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case : Dict = value _a : List[str] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' _a : Optional[Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( """The bare RegNet model outputting raw features without any specific head on top.""" , lowerCAmelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class _UpperCAmelCase ( lowerCAmelCase__): def __init__( self , snake_case_ ): super().__init__(_SCREAMING_SNAKE_CASE ) _snake_case : Any = config _snake_case : Dict = RegNetEmbeddings(_SCREAMING_SNAKE_CASE ) _snake_case : Dict = RegNetEncoder(_SCREAMING_SNAKE_CASE ) _snake_case : Any = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None ): _snake_case : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case : Dict = return_dict if return_dict is not None else self.config.use_return_dict _snake_case : List[Any] = self.embedder(_SCREAMING_SNAKE_CASE ) _snake_case : Tuple = self.encoder( _SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE ) _snake_case : int = encoder_outputs[0] _snake_case : int = self.pooler(_SCREAMING_SNAKE_CASE ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_SCREAMING_SNAKE_CASE , pooler_output=_SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( """\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n """ , lowerCAmelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class _UpperCAmelCase ( lowerCAmelCase__): def __init__( self , snake_case_ ): super().__init__(_SCREAMING_SNAKE_CASE ) _snake_case : Dict = config.num_labels _snake_case : List[Any] = RegNetModel(_SCREAMING_SNAKE_CASE ) # classification head _snake_case : Dict = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCamelCase__ ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , ): _snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict _snake_case : Optional[Any] = self.regnet(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE ) _snake_case : Dict = outputs.pooler_output if return_dict else outputs[1] _snake_case : List[Any] = self.classifier(_SCREAMING_SNAKE_CASE ) _snake_case : Any = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _snake_case : Union[str, Any] = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _snake_case : str = "single_label_classification" else: _snake_case : List[Any] = "multi_label_classification" if self.config.problem_type == "regression": _snake_case : int = MSELoss() if self.num_labels == 1: _snake_case : str = loss_fct(logits.squeeze() , labels.squeeze() ) else: _snake_case : List[str] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif self.config.problem_type == "single_label_classification": _snake_case : Optional[Any] = CrossEntropyLoss() _snake_case : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _snake_case : str = BCEWithLogitsLoss() _snake_case : List[Any] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not return_dict: _snake_case : Optional[int] = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
709
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Optional[int] = logging.get_logger(__name__) _a : str = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class _UpperCAmelCase ( _snake_case): __lowercase : Optional[Any] = """openai-gpt""" __lowercase : Dict = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , snake_case_=4_04_78 , snake_case_=5_12 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_="cls_index" , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=0.1 , **snake_case_ , ): _snake_case : Tuple = vocab_size _snake_case : Dict = n_positions _snake_case : Any = n_embd _snake_case : Any = n_layer _snake_case : Optional[int] = n_head _snake_case : Union[str, Any] = afn _snake_case : Dict = resid_pdrop _snake_case : str = embd_pdrop _snake_case : Union[str, Any] = attn_pdrop _snake_case : str = layer_norm_epsilon _snake_case : Union[str, Any] = initializer_range _snake_case : Any = summary_type _snake_case : List[str] = summary_use_proj _snake_case : Optional[int] = summary_activation _snake_case : Union[str, Any] = summary_first_dropout _snake_case : Optional[int] = summary_proj_to_labels super().__init__(**snake_case_ )
87
0
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
710
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() _a : Tuple = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _a : int = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def a__ ( a : List[str] , a : int , a : int ): """simple docstring""" _snake_case : Union[str, Any] = state_dict.pop(a ) _snake_case : Union[str, Any] = val def a__ ( a : Tuple ): """simple docstring""" _snake_case : Tuple = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: _snake_case : Dict = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) _snake_case : Tuple = value else: _snake_case : Dict = value return new_state_dict def a__ ( a : int ): """simple docstring""" _snake_case : Any = "" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) _snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : int = in_proj_weight[:256, :] _snake_case : List[str] = in_proj_bias[:256] _snake_case : Optional[Any] = in_proj_weight[256:512, :] _snake_case : List[str] = in_proj_bias[256:512] _snake_case : Dict = in_proj_weight[-256:, :] _snake_case : Dict = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention _snake_case : List[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) _snake_case : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : Union[str, Any] = in_proj_weight[:256, :] _snake_case : Tuple = in_proj_bias[:256] _snake_case : int = in_proj_weight[256:512, :] _snake_case : int = in_proj_bias[256:512] _snake_case : Dict = in_proj_weight[-256:, :] _snake_case : str = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention _snake_case : Dict = state_dict.pop( f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) _snake_case : Optional[int] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict _snake_case : Dict = in_proj_weight_cross_attn[:256, :] _snake_case : Any = in_proj_bias_cross_attn[:256] _snake_case : Union[str, Any] = in_proj_weight_cross_attn[256:512, :] _snake_case : Optional[int] = in_proj_bias_cross_attn[256:512] _snake_case : Any = in_proj_weight_cross_attn[-256:, :] _snake_case : str = in_proj_bias_cross_attn[-256:] def a__ ( a : str , a : int ): """simple docstring""" _snake_case , _snake_case : List[str] = image.size _snake_case : Dict = max(a , a ) _snake_case : Union[str, Any] = 800 if "detection" in checkpoint_url else 1_000 _snake_case : Any = target_max_size / current_max_size _snake_case : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def a__ ( a : str ): """simple docstring""" _snake_case : str = F.to_tensor(a ) _snake_case : Union[str, Any] = F.normalize(a , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def a__ ( a : Optional[Any] , a : Any , a : Union[str, Any] ): """simple docstring""" logger.info("Converting model..." ) # load original state dict _snake_case : Tuple = torch.hub.load_state_dict_from_url(a , map_location="cpu" ) # rename keys for src, dest in rename_keys: rename_key(a , a , a ) _snake_case : Union[str, Any] = rename_backbone_keys(a ) # query, key and value matrices need special treatment read_in_q_k_v(a ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _snake_case : int = "model." for key in state_dict.copy().keys(): if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): _snake_case : Optional[int] = state_dict.pop(a ) _snake_case : Any = val # create HuggingFace model and load state dict _snake_case : Tuple = TableTransformerConfig( backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: _snake_case : Any = 15 _snake_case : int = 2 _snake_case : Optional[Any] = {0: "table", 1: "table rotated"} _snake_case : Union[str, Any] = idalabel _snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} else: _snake_case : Any = 125 _snake_case : Union[str, Any] = 6 _snake_case : List[str] = { 0: "table", 1: "table column", 2: "table row", 3: "table column header", 4: "table projected row header", 5: "table spanning cell", } _snake_case : Any = idalabel _snake_case : Optional[int] = {v: k for k, v in idalabel.items()} _snake_case : Union[str, Any] = DetrImageProcessor( format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 ) _snake_case : str = TableTransformerForObjectDetection(a ) model.load_state_dict(a ) model.eval() # verify our conversion _snake_case : Optional[int] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png" _snake_case : Optional[Any] = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=a ) _snake_case : Dict = Image.open(a ).convert("RGB" ) _snake_case : Union[str, Any] = normalize(resize(a , a ) ).unsqueeze(0 ) _snake_case : str = model(a ) if "detection" in checkpoint_url: _snake_case : int = (1, 15, 3) _snake_case : List[str] = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) _snake_case : List[str] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: _snake_case : Union[str, Any] = (1, 125, 7) _snake_case : str = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) _snake_case : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) image_processor.save_pretrained(a ) if push_to_hub: # Push model to HF hub logger.info("Pushing model to the hub..." ) _snake_case : int = ( "microsoft/table-transformer-detection" if "detection" in checkpoint_url else "microsoft/table-transformer-structure-recognition" ) model.push_to_hub(a ) image_processor.push_to_hub(a ) if __name__ == "__main__": _a : Tuple = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _a : Any = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
87
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore _a : str = ''' Human: <<task>> Assistant: ''' _a : Tuple = '''huggingface-tools/default-prompts''' _a : Any = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''} def a__ ( a : List[Any] , a : Optional[Any] , a : Dict="run" ): """simple docstring""" if prompt_or_repo_id is None: _snake_case : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , a ) is not None: return prompt_or_repo_id _snake_case : int = cached_file( a , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(a , "r" , encoding="utf-8" ) as f: return f.read()
711
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
87
0
import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _UpperCAmelCase ( unittest.TestCase): def lowerCamelCase__ ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCamelCase__ ( self ): _snake_case : int = 1 _snake_case : List[str] = 3 _snake_case : str = (32, 32) _snake_case : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a ) return image @property def lowerCamelCase__ ( self ): torch.manual_seed(0 ) _snake_case : Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) return model @property def lowerCamelCase__ ( self ): torch.manual_seed(0 ) _snake_case : List[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def lowerCamelCase__ ( self ): torch.manual_seed(0 ) _snake_case : int = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , ) return RobertaSeriesModelWithTransformation(__a ) @property def lowerCamelCase__ ( self ): def extract(*snake_case_ , **snake_case_ ): class _UpperCAmelCase : def __init__( self ): _snake_case : Union[str, Any] = torch.ones([0] ) def lowerCamelCase__ ( self , snake_case_ ): self.pixel_values.to(__a ) return self return Out() return extract def lowerCamelCase__ ( self ): _snake_case : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator _snake_case : str = self.dummy_cond_unet _snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=__a ) _snake_case : str = self.dummy_vae _snake_case : Dict = self.dummy_text_encoder _snake_case : Any = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) _snake_case : Union[str, Any] = 77 _snake_case : Dict = self.dummy_image.to(__a ) _snake_case : List[str] = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk _snake_case : Optional[int] = AltDiffusionImgaImgPipeline( unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , ) _snake_case : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__a ) _snake_case : List[str] = alt_pipe.to(__a ) alt_pipe.set_progress_bar_config(disable=__a ) _snake_case : str = "A painting of a squirrel eating a burger" _snake_case : int = torch.Generator(device=__a ).manual_seed(0 ) _snake_case : Tuple = alt_pipe( [prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__a , ) _snake_case : int = output.images _snake_case : Any = torch.Generator(device=__a ).manual_seed(0 ) _snake_case : Any = alt_pipe( [prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__a , return_dict=__a , )[0] _snake_case : int = image[0, -3:, -3:, -1] _snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _snake_case : int = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = self.dummy_cond_unet _snake_case : Union[str, Any] = PNDMScheduler(skip_prk_steps=__a ) _snake_case : str = self.dummy_vae _snake_case : Tuple = self.dummy_text_encoder _snake_case : str = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) _snake_case : Union[str, Any] = 77 _snake_case : Any = self.dummy_image.to(__a ) # put models in fp16 _snake_case : Optional[int] = unet.half() _snake_case : str = vae.half() _snake_case : str = bert.half() # make sure here that pndm scheduler skips prk _snake_case : Optional[Any] = AltDiffusionImgaImgPipeline( unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , ) _snake_case : Any = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__a ) _snake_case : List[Any] = alt_pipe.to(__a ) alt_pipe.set_progress_bar_config(disable=__a ) _snake_case : Dict = "A painting of a squirrel eating a burger" _snake_case : Optional[int] = torch.manual_seed(0 ) _snake_case : List[str] = alt_pipe( [prompt] , generator=__a , num_inference_steps=2 , output_type="np" , image=__a , ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def lowerCamelCase__ ( self ): _snake_case : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) # resize to resolution that is divisible by 8 but not 16 or 32 _snake_case : List[Any] = init_image.resize((7_60, 5_04) ) _snake_case : Optional[Any] = "BAAI/AltDiffusion" _snake_case : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained( __a , safety_checker=__a , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() _snake_case : Optional[Any] = "A fantasy landscape, trending on artstation" _snake_case : Any = torch.manual_seed(0 ) _snake_case : Dict = pipe( prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , generator=__a , output_type="np" , ) _snake_case : int = output.images[0] _snake_case : int = image[2_55:2_58, 3_83:3_86, -1] assert image.shape == (5_04, 7_60, 3) _snake_case : List[Any] = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase): def lowerCamelCase__ ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self ): _snake_case : Dict = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) _snake_case : Optional[int] = init_image.resize((7_68, 5_12) ) _snake_case : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" ) _snake_case : str = "BAAI/AltDiffusion" _snake_case : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained( __a , safety_checker=__a , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() _snake_case : int = "A fantasy landscape, trending on artstation" _snake_case : Dict = torch.manual_seed(0 ) _snake_case : List[Any] = pipe( prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , generator=__a , output_type="np" , ) _snake_case : Dict = output.images[0] assert image.shape == (5_12, 7_68, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
712
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _a : Optional[int] = logging.get_logger(__name__) _a : List[str] = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class _UpperCAmelCase ( _snake_case , _snake_case): __lowercase : List[Any] = """convnextv2""" def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=4 , snake_case_=None , snake_case_=None , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.0 , snake_case_=2_24 , snake_case_=None , snake_case_=None , **snake_case_ , ): super().__init__(**snake_case_ ) _snake_case : Tuple = num_channels _snake_case : Optional[int] = patch_size _snake_case : Tuple = num_stages _snake_case : int = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes _snake_case : str = [3, 3, 9, 3] if depths is None else depths _snake_case : int = hidden_act _snake_case : Tuple = initializer_range _snake_case : Union[str, Any] = layer_norm_eps _snake_case : Optional[int] = drop_path_rate _snake_case : Union[str, Any] = image_size _snake_case : List[Any] = ["stem"] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )] _snake_case , _snake_case : Dict = get_aligned_output_features_output_indices( out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
87
0
"""simple docstring""" import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets _a : Optional[Any] = datasets.logging.get_logger(__name__) _a : List[Any] = """\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } """ _a : Optional[Any] = """\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project's README at https://github.com/google-research/bleurt#readme for more information. """ _a : Union[str, Any] = """ BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: 'scores': List of scores. Examples: >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> bleurt = datasets.load_metric(\"bleurt\") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results[\"scores\"]]) [1.03, 1.04] """ _a : str = { """bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""", """bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""", """bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""", """bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""", """bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""", """bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""", """BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""", """BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""", """BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""", """BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""", } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _UpperCAmelCase ( datasets.Metric): def lowerCamelCase__ ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , ) def lowerCamelCase__ ( self , snake_case_ ): if self.config_name == "default": logger.warning( "Using default BLEURT-Base checkpoint for sequence maximum length 128. " "You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')." ) _snake_case : Dict = """bleurt-base-128""" if self.config_name.lower() in CHECKPOINT_URLS: _snake_case : Tuple = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: _snake_case : Dict = self.config_name.upper() else: raise KeyError( F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' ) # download the model checkpoint specified by self.config_name and set up the scorer _snake_case : str = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) _snake_case : str = score.BleurtScorer(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ ): _snake_case : Any = self.scorer.score(references=lowerCamelCase_ , candidates=lowerCamelCase_ ) return {"scores": scores}
713
"""simple docstring""" from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def a__ ( a : Namespace ): """simple docstring""" return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) _a : int = """ transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. """ class _UpperCAmelCase ( _snake_case): @staticmethod def lowerCamelCase__ ( snake_case_ ): _snake_case : Dict = parser.add_parser( "convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , ) train_parser.add_argument("--model_type" , type=snake_case_ , required=snake_case_ , help="Model's type." ) train_parser.add_argument( "--tf_checkpoint" , type=snake_case_ , required=snake_case_ , help="TensorFlow checkpoint path or folder." ) train_parser.add_argument( "--pytorch_dump_output" , type=snake_case_ , required=snake_case_ , help="Path to the PyTorch saved model output." ) train_parser.add_argument("--config" , type=snake_case_ , default="" , help="Configuration file path or folder." ) train_parser.add_argument( "--finetuning_task_name" , type=snake_case_ , default=snake_case_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , ) train_parser.set_defaults(func=snake_case_ ) def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ , ): _snake_case : str = logging.get_logger("transformers-cli/converting" ) self._logger.info(F'Loading model {model_type}' ) _snake_case : Optional[int] = model_type _snake_case : Any = tf_checkpoint _snake_case : Optional[int] = pytorch_dump_output _snake_case : Tuple = config _snake_case : Tuple = finetuning_task_name def lowerCamelCase__ ( self ): if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(snake_case_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) if "ckpt" in self._tf_checkpoint.lower(): _snake_case : int = self._tf_checkpoint _snake_case : Optional[Any] = "" else: _snake_case : Optional[int] = self._tf_checkpoint _snake_case : List[str] = "" convert_transfo_xl_checkpoint_to_pytorch( snake_case_ , self._config , self._pytorch_dump_output , snake_case_ ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case_ ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
87
0
from math import factorial def a__ ( a : int , a : int ): """simple docstring""" if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k" ) return factorial(A__ ) // (factorial(A__ ) * factorial(n - k )) if __name__ == "__main__": print( """The number of five-card hands possible from a standard""", f'fifty-two card deck is: {combinations(52, 5)}\n', ) print( """If a class of 40 students must be arranged into groups of""", f'4 for group projects, there are {combinations(40, 4)} ways', """to arrange them.\n""", ) print( """If 10 teams are competing in a Formula One race, there""", f'are {combinations(10, 3)} ways that first, second and', """third place can be awarded.""", )
714
"""simple docstring""" import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def a__ ( a : List[str] , a : Any ): """simple docstring""" if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer _snake_case : Any = flax_key_tuple[:-1] + ("weight",) _snake_case : str = torch.permute(a , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(a ): # linear layer _snake_case : Optional[int] = flax_key_tuple[:-1] + ("weight",) _snake_case : Any = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: _snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("weight",) return flax_key_tuple, flax_tensor def a__ ( a : List[Any] , a : Union[str, Any] , a : List[str] ): """simple docstring""" if "metadata" in layer: _snake_case : Optional[int] = layer.split("metadata" ) _snake_case : Optional[int] = "".join(split_layer[0] )[:-1] _snake_case : int = [tuple(("metadata" + split_layer[1]).split("/" ) )] elif "kvstore" in layer: _snake_case : Any = layer.split("kvstore" ) _snake_case : str = "".join(split_layer[0] )[:-1] _snake_case : Any = [tuple(("kvstore" + split_layer[1]).split("/" ) )] else: _snake_case : List[Any] = layer.split("/" ) _snake_case : Tuple = "/".join(split_layer[:-1] ) _snake_case : int = (split_layer[-1],) if "kvstore/path" in layer: _snake_case : Optional[Any] = f'{switch_checkpoint_path}/{checkpoint_info[layer]}' elif "kvstore/driver" in layer: _snake_case : Tuple = "file" else: _snake_case : Optional[int] = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def a__ ( a : List[Any] , a : List[Any] ): """simple docstring""" _snake_case : Union[str, Any] = rename_keys(a ) _snake_case : int = {} for k, v in current_block.items(): _snake_case : Optional[int] = v _snake_case : Optional[int] = new_current_block torch.save(a , a ) def a__ ( a : Dict , a : Tuple , a : List[str] , a : int , a : str = WEIGHTS_NAME ): """simple docstring""" _snake_case : Any = convert_file_size_to_int(a ) _snake_case : Tuple = [] _snake_case : Optional[int] = {} _snake_case : Tuple = 0 _snake_case : Optional[Any] = 0 os.makedirs(a , exist_ok=a ) with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp: _snake_case : Any = serialization.msgpack_restore(fp.read() )["optimizer"]["target"] _snake_case : Optional[Any] = flatten_dict(a , sep="/" ) _snake_case : Optional[Any] = {} for layer in checkpoint_info.keys(): _snake_case , _snake_case , _snake_case : int = get_key_and_tensorstore_dict( a , a , a ) if curr_real_layer_name in all_layers: _snake_case : Dict = content else: _snake_case : Tuple = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file _snake_case : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() _snake_case : Dict = torch.tensor(a ) _snake_case : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts _snake_case , _snake_case : Optional[int] = rename_base_flax_keys(tuple(key.split("/" ) ) , a ) _snake_case : Optional[Any] = "/".join(a ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: _snake_case : Any = os.path.join( a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) ) rename_and_save_block(a , a ) sharded_state_dicts.append(current_block.keys() ) del current_block _snake_case : List[Any] = {} _snake_case : str = 0 _snake_case : List[str] = raw_weights.to(getattr(a , a ) ) current_block_size += weight_size total_size += weight_size # Add the last block _snake_case : int = os.path.join(a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) ) rename_and_save_block(a , a ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(a ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index _snake_case : str = {} _snake_case : Any = {} for idx, shard in enumerate(a ): _snake_case : Optional[int] = weights_name.replace( ".bin" , f'-{idx+1:05d}-of-{len(a ):05d}.bin' ) # len(sharded_state_dicts):05d} _snake_case : Dict = os.path.join(a , weights_name.replace(".bin" , f'-{idx+1:05d}-of-???.bin' ) ) os.rename(a , os.path.join(a , a ) ) _snake_case : Dict = shard for key in shard: _snake_case : int = shard_file # Add the metadata _snake_case : List[Any] = {"total_size": total_size} _snake_case : Any = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f: _snake_case : Union[str, Any] = json.dumps(a , indent=2 , sort_keys=a ) + "\n" f.write(a ) return metadata, index if __name__ == "__main__": _a : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""") parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""", type=str, required=False, help="""Path to the output pytorch model.""", ) _a : Optional[int] = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def a__ ( ): """simple docstring""" from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer _snake_case : List[str] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" ) config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" ) _snake_case : str = SwitchTransformersForConditionalGeneration.from_pretrained( "/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" ) _snake_case : List[Any] = TaTokenizer.from_pretrained("t5-small" ) _snake_case : Optional[Any] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." _snake_case : Dict = tokenizer(a , return_tensors="pt" ).input_ids _snake_case : List[Any] = model.generate(a , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
87
0
"""simple docstring""" _a : Optional[int] = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ _a : Optional[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}] _a : Dict = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
715
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class _UpperCAmelCase ( _snake_case , _snake_case , unittest.TestCase): __lowercase : Dict = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) __lowercase : Optional[Any] = ( { """feature-extraction""": TFMobileBertModel, """fill-mask""": TFMobileBertForMaskedLM, """question-answering""": TFMobileBertForQuestionAnswering, """text-classification""": TFMobileBertForSequenceClassification, """token-classification""": TFMobileBertForTokenClassification, """zero-shot""": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) __lowercase : Union[str, Any] = False __lowercase : Optional[int] = False def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_=False ): _snake_case : Union[str, Any] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) if return_labels: if model_class in get_values(snake_case_ ): _snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ): _snake_case : Optional[Any] = parent _snake_case : List[Any] = batch_size _snake_case : Optional[int] = seq_length _snake_case : Dict = is_training _snake_case : Union[str, Any] = use_input_mask _snake_case : List[Any] = use_token_type_ids _snake_case : int = use_labels _snake_case : Dict = vocab_size _snake_case : Tuple = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : Optional[Any] = intermediate_size _snake_case : Dict = hidden_act _snake_case : Tuple = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : str = max_position_embeddings _snake_case : str = type_vocab_size _snake_case : Any = type_sequence_label_size _snake_case : Optional[int] = initializer_range _snake_case : List[Any] = num_labels _snake_case : Optional[int] = num_choices _snake_case : Optional[int] = scope _snake_case : Any = embedding_size def lowerCamelCase__ ( self ): _snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : Optional[Any] = None if self.use_input_mask: _snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case : List[str] = None if self.use_token_type_ids: _snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _snake_case : Dict = None _snake_case : Tuple = None _snake_case : str = None if self.use_labels: _snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) _snake_case : Tuple = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Dict = TFMobileBertModel(config=snake_case_ ) _snake_case : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Optional[int] = model(snake_case_ ) _snake_case : Union[str, Any] = [input_ids, input_mask] _snake_case : Optional[Any] = model(snake_case_ ) _snake_case : Dict = model(snake_case_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : List[Any] = TFMobileBertForMaskedLM(config=snake_case_ ) _snake_case : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : List[str] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=snake_case_ ) _snake_case : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Tuple = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : str = TFMobileBertForPreTraining(config=snake_case_ ) _snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : List[Any] = model(snake_case_ ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : str = self.num_labels _snake_case : str = TFMobileBertForSequenceClassification(config=snake_case_ ) _snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Optional[int] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Any = self.num_choices _snake_case : Tuple = TFMobileBertForMultipleChoice(config=snake_case_ ) _snake_case : List[Any] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) ) _snake_case : List[str] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) ) _snake_case : Tuple = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) ) _snake_case : int = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _snake_case : Optional[Any] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Union[str, Any] = self.num_labels _snake_case : Optional[int] = TFMobileBertForTokenClassification(config=snake_case_ ) _snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : List[Any] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : int = TFMobileBertForQuestionAnswering(config=snake_case_ ) _snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _snake_case : Union[str, Any] = model(snake_case_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Tuple = config_and_inputs _snake_case : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict def lowerCamelCase__ ( self ): _snake_case : int = TFMobileBertModelTest.TFMobileBertModelTester(self ) _snake_case : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 ) def lowerCamelCase__ ( self ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self ): _snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ ) @slow def lowerCamelCase__ ( self ): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _snake_case : str = TFMobileBertModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) @require_tf class _UpperCAmelCase ( unittest.TestCase): @slow def lowerCamelCase__ ( self ): _snake_case : Any = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" ) _snake_case : str = tf.constant([[0, 1, 2, 3, 4, 5]] ) _snake_case : Union[str, Any] = model(snake_case_ )[0] _snake_case : int = [1, 6, 3_05_22] self.assertEqual(output.shape , snake_case_ ) _snake_case : Optional[Any] = tf.constant( [ [ [-4.5919547, -9.248295, -9.645256], [-6.7306175, -6.440284, -6.6052837], [-7.2743506, -6.7847915, -6.024673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
87
0
"""simple docstring""" from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Optional[Any] = logging.get_logger(__name__) _a : Dict = { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json""" ), } class _UpperCAmelCase ( _snake_case): __lowercase : Optional[int] = """xlm-prophetnet""" __lowercase : Union[str, Any] = ["""past_key_values"""] __lowercase : str = { """num_attention_heads""": """num_encoder_attention_heads""", } def __init__( self , snake_case_ = 0.1 , snake_case_ = "gelu" , snake_case_ = 3_05_22 , snake_case_ = 10_24 , snake_case_ = 40_96 , snake_case_ = 12 , snake_case_ = 16 , snake_case_ = 40_96 , snake_case_ = 12 , snake_case_ = 16 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 5_12 , snake_case_ = 0.02 , snake_case_ = True , snake_case_ = True , snake_case_ = 0 , snake_case_ = 2 , snake_case_ = 32 , snake_case_ = 1_28 , snake_case_ = False , snake_case_ = 0.0 , snake_case_ = True , snake_case_ = 0 , snake_case_ = 1 , snake_case_ = 2 , **snake_case_ , ): _snake_case : Union[str, Any] = vocab_size _snake_case : Dict = hidden_size _snake_case : int = encoder_ffn_dim _snake_case : Union[str, Any] = num_encoder_layers _snake_case : Dict = num_encoder_attention_heads _snake_case : Tuple = decoder_ffn_dim _snake_case : Dict = num_decoder_layers _snake_case : Dict = num_decoder_attention_heads _snake_case : int = max_position_embeddings _snake_case : Dict = init_std # Normal(0, this parameter) _snake_case : int = activation_function # parameters for xlmprophetnet _snake_case : Any = ngram _snake_case : Union[str, Any] = num_buckets _snake_case : str = relative_max_distance _snake_case : Any = disable_ngram_loss _snake_case : List[str] = eps # 3 Types of Dropout _snake_case : Optional[Any] = attention_dropout _snake_case : Tuple = activation_dropout _snake_case : Any = dropout _snake_case : List[str] = use_cache super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , ) @property def lowerCamelCase__ ( self ): return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def lowerCamelCase__ ( self , snake_case_ ): raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`." )
716
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _a : List[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = ["""BartphoTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys _a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" # using dfs for finding eulerian path traversal def a__ ( a : Any , a : Union[str, Any] , a : Union[str, Any] , a : Tuple=None ): """simple docstring""" _snake_case : List[str] = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: _snake_case , _snake_case : Optional[Any] = True, True _snake_case : Optional[Any] = dfs(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) return path def a__ ( a : List[Any] , a : str ): """simple docstring""" _snake_case : str = 0 _snake_case : Union[str, Any] = -1 for i in range(snake_case__ ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 _snake_case : str = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def a__ ( a : Union[str, Any] , a : Any ): """simple docstring""" _snake_case : Optional[int] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] _snake_case , _snake_case : Dict = check_circuit_or_path(snake_case__ , snake_case__ ) if check == 3: print("graph is not Eulerian" ) print("no path" ) return _snake_case : Optional[Any] = 1 if check == 2: _snake_case : int = odd_node print("graph has a Euler path" ) if check == 1: print("graph has a Euler cycle" ) _snake_case : Optional[Any] = dfs(snake_case__ , snake_case__ , snake_case__ ) print(snake_case__ ) def a__ ( ): """simple docstring""" _snake_case : List[str] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} _snake_case : int = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} _snake_case : Optional[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} _snake_case : Dict = {1: [2, 3], 2: [1, 3], 3: [1, 2]} _snake_case : Optional[int] = { 1: [], 2: [] # all degree is zero } _snake_case : Tuple = 10 check_euler(snake_case__ , snake_case__ ) check_euler(snake_case__ , snake_case__ ) check_euler(snake_case__ , snake_case__ ) check_euler(snake_case__ , snake_case__ ) check_euler(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
717
"""simple docstring""" def a__ ( a : list , a : int , a : int = 0 , a : int = 0 ): """simple docstring""" _snake_case : Optional[int] = right or len(a ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(a , a , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
87
0
"""simple docstring""" from __future__ import annotations import collections import pprint from pathlib import Path def a__ ( a : List[str] ): """simple docstring""" return "".join(sorted(a ) ) def a__ ( a : List[str] ): """simple docstring""" return word_by_signature[signature(a )] _a : str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""") _a : Tuple = sorted({word.strip().lower() for word in data.splitlines()}) _a : Optional[int] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": _a : Tuple = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("""anagrams.txt""", """w""") as file: file.write("""all_anagrams = \n """) file.write(pprint.pformat(all_anagrams))
718
"""simple docstring""" from __future__ import annotations class _UpperCAmelCase : def __init__( self , snake_case_ , snake_case_ ): _snake_case , _snake_case : Dict = text, pattern _snake_case , _snake_case : int = len(snake_case_ ), len(snake_case_ ) def lowerCamelCase__ ( self , snake_case_ ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def lowerCamelCase__ ( self , snake_case_ ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def lowerCamelCase__ ( self ): # searches pattern in text and returns index positions _snake_case : List[str] = [] for i in range(self.textLen - self.patLen + 1 ): _snake_case : Union[str, Any] = self.mismatch_in_text(snake_case_ ) if mismatch_index == -1: positions.append(snake_case_ ) else: _snake_case : Tuple = self.match_in_pattern(self.text[mismatch_index] ) _snake_case : Tuple = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions _a : List[Any] = """ABAABA""" _a : str = """AB""" _a : List[Any] = BoyerMooreSearch(text, pattern) _a : Any = bms.bad_character_heuristic() if len(positions) == 0: print("""No match found""") else: print("""Pattern found in following positions: """) print(positions)
87
0
"""simple docstring""" from ....utils import logging _a : List[str] = logging.get_logger(__name__) class _UpperCAmelCase ( SCREAMING_SNAKE_CASE__): def __init__( self , snake_case_ , snake_case_=None , snake_case_=20_48 ): _snake_case : Dict = config.__dict__ _snake_case : str = modal_hidden_size if num_labels: _snake_case : List[str] = num_labels
719
"""simple docstring""" from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": _a : Dict = input("""Enter image url: """).strip() print(f'Downloading image from {url} ...') _a : str = BeautifulSoup(requests.get(url).content, """html.parser""") # The image URL is in the content field of the first meta tag with property og:image _a : str = soup.find("""meta""", {"""property""": """og:image"""})["""content"""] _a : Dict = requests.get(image_url).content _a : str = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg' with open(file_name, """wb""") as fp: fp.write(image_data) print(f'Done. Image saved to disk as {file_name}.')
87
0
"""simple docstring""" import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _a : str = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. _a : Dict = direct_transformers_import(PATH_TO_TRANSFORMERS) _a : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` _a : str = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") _a : List[str] = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def a__ ( a : Optional[Any] ): """simple docstring""" _snake_case : List[Any] = None # source code of `config_class` _snake_case : List[Any] = inspect.getsource(__a ) _snake_case : Dict = _re_checkpoint.findall(__a ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/" ): _snake_case : List[str] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link _snake_case : Optional[Any] = f'https://huggingface.co/{ckpt_name}' if ckpt_link == ckpt_link_from_name: _snake_case : str = ckpt_name break return checkpoint def a__ ( ): """simple docstring""" _snake_case : Dict = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue _snake_case : Union[str, Any] = get_checkpoint_from_config_class(__a ) _snake_case : Optional[Any] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(__a ) if len(__a ) > 0: _snake_case : Tuple = "\n".join(sorted(__a ) ) raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
720
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _a : Optional[int] = { """configuration_pix2struct""": [ """PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Pix2StructConfig""", """Pix2StructTextConfig""", """Pix2StructVisionConfig""", ], """processing_pix2struct""": ["""Pix2StructProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = ["""Pix2StructImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Dict = [ """PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Pix2StructPreTrainedModel""", """Pix2StructForConditionalGeneration""", """Pix2StructVisionModel""", """Pix2StructTextModel""", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys _a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _a : List[str] = { """configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Any = [ """LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongT5EncoderModel""", """LongT5ForConditionalGeneration""", """LongT5Model""", """LongT5PreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = [ """FlaxLongT5ForConditionalGeneration""", """FlaxLongT5Model""", """FlaxLongT5PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys _a : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
721
"""simple docstring""" import argparse import json import subprocess def a__ ( a : Optional[Any] , a : Optional[int] ): """simple docstring""" _snake_case : str = [] _snake_case : Optional[Any] = ( f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"' " https://api.github.com/repos/huggingface/transformers/actions/runners" ) _snake_case : Dict = subprocess.run(a , shell=a , stdout=subprocess.PIPE ) _snake_case : Tuple = output.stdout.decode("utf-8" ) _snake_case : List[str] = json.loads(a ) _snake_case : Any = status["runners"] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(a ) # save the result so we can report them on Slack with open("offline_runners.txt" , "w" ) as fp: fp.write(json.dumps(a ) ) if len(a ) > 0: _snake_case : Any = "\n".join([x["name"] for x in offline_runners] ) raise ValueError(f'The following runners are offline:\n{failed}' ) if __name__ == "__main__": def a__ ( a : Optional[int] ): """simple docstring""" return values.split("," ) _a : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--target_runners""", default=None, type=list_str, required=True, help="""Comma-separated list of runners to check status.""", ) parser.add_argument( """--token""", default=None, type=str, required=True, help="""A token that has actions:read permission.""" ) _a : List[str] = parser.parse_args() get_runner_status(args.target_runners, args.token)
87
0
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _a : List[str] = logging.getLogger(__name__) @dataclass class _UpperCAmelCase : __lowercase : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""}) __lowercase : Optional[str] = field( default=__UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) __lowercase : Optional[str] = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""}) __lowercase : Optional[str] = field( default=__UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""}) __lowercase : bool = field(default=__UpperCAmelCase , metadata={"""help""": """Set this flag to use fast tokenization."""}) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. __lowercase : Optional[str] = field( default=__UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class _UpperCAmelCase : __lowercase : str = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""}) __lowercase : Optional[str] = field( default=__UpperCAmelCase , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) __lowercase : int = field( default=1_2_8 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __lowercase : bool = field( default=__UpperCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""}) def a__ ( ): """simple docstring""" _snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _snake_case , _snake_case , _snake_case : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _snake_case , _snake_case , _snake_case : str = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' " --overwrite_output_dir to overcome." ) _snake_case : List[str] = import_module("tasks" ) try: _snake_case : Optional[Any] = getattr(_lowerCAmelCase , model_args.task_type ) _snake_case : List[str] = token_classification_task_clazz() except AttributeError: raise ValueError( f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ' f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , _lowerCAmelCase ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task _snake_case : Union[str, Any] = token_classification_task.get_labels(data_args.labels ) _snake_case : List[Any] = dict(enumerate(_lowerCAmelCase ) ) _snake_case : Optional[Any] = len(_lowerCAmelCase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _snake_case : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid={label: i for i, label in enumerate(_lowerCAmelCase )} , cache_dir=model_args.cache_dir , ) _snake_case : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) _snake_case : Any = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , ) # Get datasets _snake_case : Tuple = ( TokenClassificationDataset( token_classification_task=_lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=_lowerCAmelCase , labels=_lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) _snake_case : List[str] = ( TokenClassificationDataset( token_classification_task=_lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=_lowerCAmelCase , labels=_lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(a : str , a : int ) -> Tuple[List[int], List[int]]: _snake_case : Any = np.argmax(_lowerCAmelCase , axis=2 ) _snake_case , _snake_case : int = preds.shape _snake_case : List[Any] = [[] for _ in range(_lowerCAmelCase )] _snake_case : List[Any] = [[] for _ in range(_lowerCAmelCase )] for i in range(_lowerCAmelCase ): for j in range(_lowerCAmelCase ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(a : str ) -> Dict: _snake_case , _snake_case : str = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(_lowerCAmelCase , _lowerCAmelCase ), "precision": precision_score(_lowerCAmelCase , _lowerCAmelCase ), "recall": recall_score(_lowerCAmelCase , _lowerCAmelCase ), "f1": fa_score(_lowerCAmelCase , _lowerCAmelCase ), } # Data collator _snake_case : int = DataCollatorWithPadding(_lowerCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer _snake_case : Optional[int] = Trainer( model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , compute_metrics=_lowerCAmelCase , data_collator=_lowerCAmelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _snake_case : Tuple = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) _snake_case : List[Any] = trainer.evaluate() _snake_case : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" ) if trainer.is_world_process_zero(): with open(_lowerCAmelCase , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s" , _lowerCAmelCase , _lowerCAmelCase ) writer.write("%s = %s\n" % (key, value) ) results.update(_lowerCAmelCase ) # Predict if training_args.do_predict: _snake_case : Dict = TokenClassificationDataset( token_classification_task=_lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=_lowerCAmelCase , labels=_lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) _snake_case , _snake_case , _snake_case : List[str] = trainer.predict(_lowerCAmelCase ) _snake_case , _snake_case : List[Any] = align_predictions(_lowerCAmelCase , _lowerCAmelCase ) _snake_case : int = os.path.join(training_args.output_dir , "test_results.txt" ) if trainer.is_world_process_zero(): with open(_lowerCAmelCase , "w" ) as writer: for key, value in metrics.items(): logger.info(" %s = %s" , _lowerCAmelCase , _lowerCAmelCase ) writer.write("%s = %s\n" % (key, value) ) # Save predictions _snake_case : Optional[int] = os.path.join(training_args.output_dir , "test_predictions.txt" ) if trainer.is_world_process_zero(): with open(_lowerCAmelCase , "w" ) as writer: with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f: token_classification_task.write_predictions_to_file(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return results def a__ ( a : List[Any] ): """simple docstring""" main() if __name__ == "__main__": main()
700
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class _UpperCAmelCase ( unittest.TestCase): def lowerCamelCase__ ( self ): _snake_case : List[Any] = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) _snake_case : List[Any] = Vector() def lowerCamelCase__ ( self ): _snake_case : Any = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(snake_case_ ) , "(0,0,0,0,0,1)" ) def lowerCamelCase__ ( self ): _snake_case : Dict = Vector([1, 2, 3, 4] ) self.assertEqual(len(snake_case_ ) , 4 ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = Vector([1, 2] ) _snake_case : List[str] = Vector([1, 2, 3, 4, 5] ) _snake_case : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) _snake_case : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = Vector([1, 2, 3] ) _snake_case : Any = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def lowerCamelCase__ ( self ): _snake_case : str = Vector([1, 2, 3] ) _snake_case : Union[str, Any] = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def lowerCamelCase__ ( self ): _snake_case : Optional[int] = Vector([1, 2, 3] ) _snake_case : List[Any] = Vector([2, -1, 4] ) # for test of dot product _snake_case : Union[str, Any] = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" ) self.assertEqual((a * b) , 0 ) def lowerCamelCase__ ( self ): self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 ) def lowerCamelCase__ ( self ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" ) def lowerCamelCase__ ( self ): _snake_case : Tuple = Vector([1, 2, 3] ) _snake_case : Optional[Any] = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , snake_case_ , snake_case_ ) ) , "(3,4,7)" ) def lowerCamelCase__ ( self ): _snake_case : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] ) _snake_case : Optional[int] = x.copy() self.assertEqual(str(snake_case_ ) , str(snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : Dict = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(snake_case_ ) , "(0,1,0)" ) def lowerCamelCase__ ( self ): _snake_case : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(snake_case_ , snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : Optional[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(snake_case_ , snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def lowerCamelCase__ ( self ): _snake_case : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) _snake_case : List[str] = Vector([1, 2, 3] ) self.assertEqual("(14,32,50)" , str(a * x ) ) self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) ) def lowerCamelCase__ ( self ): _snake_case : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def lowerCamelCase__ ( self ): _snake_case : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) ) def lowerCamelCase__ ( self ): _snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) ) def lowerCamelCase__ ( self ): self.assertEqual( "|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
87
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _a : List[str] = logging.get_logger(__name__) _a : List[Any] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k', 'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v', 'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q', 'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u', 'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v', 'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out', 'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos', 'self_attn.rotary_emb': 'encoder.embed_positions', 'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm', 'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1', 'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2', 'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv', 'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm', 'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm', 'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense', 'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense', 'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm', 'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense', 'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense', 'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } _a : Union[str, Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def a__ ( a : Tuple , a : List[Any] , a : List[str] , a : Tuple , a : int ): """simple docstring""" for attribute in key.split("." ): _snake_case : Optional[int] = getattr(lowerCamelCase_ , lowerCamelCase_ ) if weight_type is not None: _snake_case : int = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape else: _snake_case : Optional[Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": _snake_case : List[str] = value elif weight_type == "weight_g": _snake_case : List[Any] = value elif weight_type == "weight_v": _snake_case : Tuple = value elif weight_type == "bias": _snake_case : Optional[Any] = value elif weight_type == "running_mean": _snake_case : List[Any] = value elif weight_type == "running_var": _snake_case : Union[str, Any] = value elif weight_type == "num_batches_tracked": _snake_case : List[Any] = value elif weight_type == "inv_freq": _snake_case : Any = value else: _snake_case : str = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def a__ ( a : List[str] , a : Dict , a : Tuple ): """simple docstring""" _snake_case : Tuple = [] _snake_case : Optional[int] = fairseq_model.state_dict() _snake_case : Tuple = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): _snake_case : Any = False if "conv_layers" in name: load_conv_layer( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == "group" , ) _snake_case : Dict = True else: for key, mapped_key in MAPPING.items(): _snake_case : Dict = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: _snake_case : Optional[int] = True if "*" in mapped_key: _snake_case : List[Any] = name.split(lowerCamelCase_ )[0].split("." )[-2] _snake_case : List[str] = mapped_key.replace("*" , lowerCamelCase_ ) if "pos_bias_u" in name: _snake_case : Union[str, Any] = None elif "pos_bias_v" in name: _snake_case : str = None elif "weight_g" in name: _snake_case : Dict = '''weight_g''' elif "weight_v" in name: _snake_case : str = '''weight_v''' elif "bias" in name: _snake_case : List[str] = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj _snake_case : List[str] = '''weight''' elif "running_mean" in name: _snake_case : Any = '''running_mean''' elif "inv_freq" in name: _snake_case : Union[str, Any] = '''inv_freq''' elif "running_var" in name: _snake_case : Optional[int] = '''running_var''' elif "num_batches_tracked" in name: _snake_case : Tuple = '''num_batches_tracked''' else: _snake_case : str = None set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) continue if not is_used: unused_weights.append(lowerCamelCase_ ) logger.warning(f'Unused weights: {unused_weights}' ) def a__ ( a : List[str] , a : Optional[int] , a : str , a : List[Any] , a : Any ): """simple docstring""" _snake_case : Optional[int] = full_name.split("conv_layers." )[-1] _snake_case : Optional[int] = name.split("." ) _snake_case : Optional[int] = int(items[0] ) _snake_case : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) _snake_case : Any = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) _snake_case : Optional[Any] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) _snake_case : Dict = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) _snake_case : Tuple = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(lowerCamelCase_ ) @torch.no_grad() def a__ ( a : List[Any] , a : Dict , a : Tuple=None , a : Tuple=None , a : List[str]=True ): """simple docstring""" if config_path is not None: _snake_case : Tuple = WavaVecaConformerConfig.from_pretrained(lowerCamelCase_ , hidden_act="swish" ) else: _snake_case : Optional[int] = WavaVecaConformerConfig() if "rope" in checkpoint_path: _snake_case : Tuple = '''rotary''' if is_finetuned: if dict_path: _snake_case : Union[str, Any] = Dictionary.load(lowerCamelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _snake_case : Dict = target_dict.pad_index _snake_case : int = target_dict.bos_index _snake_case : Tuple = target_dict.eos_index _snake_case : Optional[int] = len(target_dict.symbols ) _snake_case : Union[str, Any] = os.path.join(lowerCamelCase_ , "vocab.json" ) if not os.path.isdir(lowerCamelCase_ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCamelCase_ ) ) return os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ ) _snake_case : Dict = target_dict.indices # fairseq has the <pad> and <s> switched _snake_case : str = 0 _snake_case : Tuple = 1 with open(lowerCamelCase_ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(lowerCamelCase_ , lowerCamelCase_ ) _snake_case : List[Any] = WavaVecaCTCTokenizer( lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCamelCase_ , ) _snake_case : Optional[int] = True if config.feat_extract_norm == '''layer''' else False _snake_case : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ) _snake_case : str = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ ) processor.save_pretrained(lowerCamelCase_ ) _snake_case : Union[str, Any] = WavaVecaConformerForCTC(lowerCamelCase_ ) else: _snake_case : str = WavaVecaConformerForPreTraining(lowerCamelCase_ ) if is_finetuned: _snake_case : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: _snake_case : List[str] = argparse.Namespace(task="audio_pretraining" ) _snake_case : Optional[int] = fairseq.tasks.setup_task(lowerCamelCase_ ) _snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCamelCase_ ) _snake_case : List[str] = model[0].eval() recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , not is_finetuned ) hf_wavavec.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": _a : List[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) _a : int = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
701
"""simple docstring""" from __future__ import annotations from collections import namedtuple def a__ ( a : float , a : float , a : float ): """simple docstring""" _snake_case : Optional[Any] = namedtuple("result" , "name value" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("Only one argument must be 0" ) elif power < 0: raise ValueError( "Power cannot be negative in any electrical/electronics system" ) elif voltage == 0: return result("voltage" , power / current ) elif current == 0: return result("current" , power / voltage ) elif power == 0: return result("power" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
87
0
"""simple docstring""" def a__ ( a : str = 100 ): """simple docstring""" _snake_case : Tuple = n * (n + 1) * (2 * n + 1) / 6 _snake_case : Dict = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(f'{solution() = }')
702
"""simple docstring""" import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _UpperCAmelCase ( _snake_case , unittest.TestCase): __lowercase : Any = TextToVideoSDPipeline __lowercase : str = TEXT_TO_IMAGE_PARAMS __lowercase : int = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __lowercase : Optional[int] = frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ]) def lowerCamelCase__ ( self ): torch.manual_seed(0 ) _snake_case : str = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , ) _snake_case : List[Any] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , ) torch.manual_seed(0 ) _snake_case : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) _snake_case : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , ) _snake_case : Tuple = CLIPTextModel(snake_case_ ) _snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _snake_case : Any = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def lowerCamelCase__ ( self , snake_case_ , snake_case_=0 ): if str(snake_case_ ).startswith("mps" ): _snake_case : str = torch.manual_seed(snake_case_ ) else: _snake_case : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) _snake_case : str = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def lowerCamelCase__ ( self ): _snake_case : int = "cpu" # ensure determinism for the device-dependent torch.Generator _snake_case : Optional[Any] = self.get_dummy_components() _snake_case : Tuple = TextToVideoSDPipeline(**snake_case_ ) _snake_case : List[str] = sd_pipe.to(snake_case_ ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) _snake_case : int = self.get_dummy_inputs(snake_case_ ) _snake_case : Union[str, Any] = "np" _snake_case : Dict = sd_pipe(**snake_case_ ).frames _snake_case : Any = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) _snake_case : Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ ( self ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def lowerCamelCase__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=1E-2 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def lowerCamelCase__ ( self ): pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def lowerCamelCase__ ( self ): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): return super().test_progress_bar() @slow @skip_mps class _UpperCAmelCase ( unittest.TestCase): def lowerCamelCase__ ( self ): _snake_case : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" ) _snake_case : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) _snake_case : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) _snake_case : Tuple = pipe.to("cuda" ) _snake_case : List[Any] = "Spiderman is surfing" _snake_case : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) _snake_case : int = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type="pt" ).frames _snake_case : int = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def lowerCamelCase__ ( self ): _snake_case : Any = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" ) _snake_case : str = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) _snake_case : int = pipe.to("cuda" ) _snake_case : Any = "Spiderman is surfing" _snake_case : str = torch.Generator(device="cpu" ).manual_seed(0 ) _snake_case : Any = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="pt" ).frames _snake_case : Optional[int] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
87
0
"""simple docstring""" import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""): _a : Dict = True from torch.cuda.amp import autocast _a : Union[str, Any] = logging.getLogger(__name__) @dataclass class _UpperCAmelCase : __lowercase : int = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""}) __lowercase : Optional[Any] = field( default=snake_case__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) __lowercase : List[str] = field( default=snake_case__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""}) __lowercase : Optional[int] = field( default=snake_case__ , metadata={"""help""": """Whether to log verbose messages or not."""} , ) __lowercase : Union[str, Any] = field( default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""}) __lowercase : Any = field( default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""}) __lowercase : Union[str, Any] = field( default=0.999_995 , metadata={"""help""": """Decay of gumbel temperature during training."""}) def a__ ( a : Optional[Any] , a : Tuple ): """simple docstring""" logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) _snake_case : Any = logging.WARNING if model_args.verbose_logging: _snake_case : str = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): _snake_case : List[str] = logging.INFO logger.setLevel(__SCREAMING_SNAKE_CASE ) @dataclass class _UpperCAmelCase : __lowercase : List[Any] = field( default=snake_case__ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""}) __lowercase : int = field( default=snake_case__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}) __lowercase : Dict = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) __lowercase : Dict = field( default="""validation""" , metadata={ """help""": ( """The name of the validation data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) __lowercase : Optional[Any] = field( default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , ) __lowercase : Union[str, Any] = field( default=snake_case__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""}) __lowercase : Optional[int] = field( default=1 , metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" } , ) __lowercase : Dict = field( default=snake_case__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) __lowercase : Tuple = field( default=2_0.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""}) @dataclass class _UpperCAmelCase : __lowercase : Optional[Any] = 4_2 __lowercase : Optional[int] = 4_2 __lowercase : Tuple = """longest""" __lowercase : Tuple = None __lowercase : List[str] = None def __call__( self , snake_case_ ): # reformat list to dict and set to pytorch format _snake_case : List[Any] = self.feature_extractor.pad( lowercase_ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) _snake_case : Optional[int] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] ) _snake_case : Tuple = batch["input_values"].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula _snake_case : Tuple = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to( torch.long ) _snake_case : Union[str, Any] = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device ) # these two operations makes sure that all values # before the output lengths indices are attended to _snake_case : Optional[Any] = 1 _snake_case : Dict = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices _snake_case : str = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=lowercase_ , min_masks=2 , ) return batch class _UpperCAmelCase ( snake_case__): def __init__( self , *snake_case_ , snake_case_=1 , snake_case_=0 , snake_case_=1.0 , **snake_case_ ): super().__init__(*lowercase_ , **lowercase_ ) _snake_case : Union[str, Any] = 0 _snake_case : int = max_gumbel_temp _snake_case : Union[str, Any] = min_gumbel_temp _snake_case : List[str] = gumbel_temp_decay def lowerCamelCase__ ( self , snake_case_ , snake_case_ ): model.train() _snake_case : Dict = self._prepare_inputs(lowercase_ ) if self.use_amp: with autocast(): _snake_case : Dict = self.compute_loss(lowercase_ , lowercase_ ) else: _snake_case : Tuple = self.compute_loss(lowercase_ , lowercase_ ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": _snake_case : Any = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": _snake_case : List[Any] = loss.sum() / (inputs["mask_time_indices"]).sum() else: raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' ) if self.args.gradient_accumulation_steps > 1: _snake_case : int = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(lowercase_ ).backward() elif self.use_apex: with amp.scale_loss(lowercase_ , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(lowercase_ ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) return loss.detach() def a__ ( ): """simple docstring""" _snake_case : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _snake_case : str = parser.parse_args_into_dataclasses() configure_logger(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Downloading and loading a dataset from the hub. _snake_case : Optional[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" _snake_case : Optional[int] = DatasetDict() _snake_case : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , ) _snake_case : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" _snake_case : Tuple = DatasetDict() _snake_case : Dict = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , ) _snake_case : Union[str, Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported _snake_case : List[str] = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__SCREAMING_SNAKE_CASE ) def prepare_dataset(a : int ): # check that all files have the correct sampling rate _snake_case : Optional[Any] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays _snake_case : Optional[int] = datasets.map( __SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names ) # filter audio files that are too long _snake_case : Any = vectorized_datasets.filter( lambda a : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(a : int ): return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` _snake_case : Optional[Any] = vectorized_datasets.map( __SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 _snake_case : Optional[int] = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( "PreTraining is only supported for ``config.do_stable_layer_norm=True`` and" " ``config.feat_extract_norm='layer'" ) _snake_case : int = WavaVecaForPreTraining(__SCREAMING_SNAKE_CASE ) _snake_case : Dict = DataCollatorForWavaVecaPretraining(model=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE ) _snake_case : str = WavaVecaPreTrainer( model=__SCREAMING_SNAKE_CASE , data_collator=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=__SCREAMING_SNAKE_CASE , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
703
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class _UpperCAmelCase ( _snake_case): __lowercase : int = """EncodecFeatureExtractor""" __lowercase : str = ("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self , snake_case_ , snake_case_ ): super().__init__(snake_case_ , snake_case_ ) _snake_case : Dict = self.feature_extractor _snake_case : Any = False def lowerCamelCase__ ( self , snake_case_=None , snake_case_=None , snake_case_=True ): return self.tokenizer.get_decoder_prompt_ids(task=snake_case_ , language=snake_case_ , no_timestamps=snake_case_ ) def __call__( self , *snake_case_ , **snake_case_ ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*snake_case_ , **snake_case_ ) _snake_case : str = kwargs.pop("audio" , snake_case_ ) _snake_case : Optional[int] = kwargs.pop("sampling_rate" , snake_case_ ) _snake_case : Optional[Any] = kwargs.pop("text" , snake_case_ ) if len(snake_case_ ) > 0: _snake_case : Any = args[0] _snake_case : Union[str, Any] = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if text is not None: _snake_case : Any = self.tokenizer(snake_case_ , **snake_case_ ) if audio is not None: _snake_case : Any = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ ) if audio is None: return inputs elif text is None: return audio_inputs else: _snake_case : str = audio_inputs["input_values"] if "padding_mask" in audio_inputs: _snake_case : List[str] = audio_inputs["padding_mask"] return inputs def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ): _snake_case : Tuple = kwargs.pop("audio" , snake_case_ ) _snake_case : List[str] = kwargs.pop("padding_mask" , snake_case_ ) if len(snake_case_ ) > 0: _snake_case : Tuple = args[0] _snake_case : Dict = args[1:] if audio_values is not None: return self._decode_audio(snake_case_ , padding_mask=snake_case_ ) else: return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ ) def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ): return self.tokenizer.decode(*snake_case_ , **snake_case_ ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ): _snake_case : Optional[int] = to_numpy(snake_case_ ) _snake_case , _snake_case , _snake_case : Tuple = audio_values.shape if padding_mask is None: return list(snake_case_ ) _snake_case : Optional[int] = to_numpy(snake_case_ ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) _snake_case : Any = seq_len - padding_mask.shape[-1] _snake_case : Optional[Any] = 1 - self.feature_extractor.padding_value _snake_case : Optional[int] = np.pad(snake_case_ , ((0, 0), (0, difference)) , "constant" , constant_values=snake_case_ ) _snake_case : Any = audio_values.tolist() for i in range(snake_case_ ): _snake_case : Tuple = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] _snake_case : Tuple = sliced_audio.reshape(snake_case_ , -1 ) return audio_values
87
0
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def a__ ( a : Union[str, Any] , a : List[Any] , a : List[Any] ): """simple docstring""" _snake_case : Any = BertConfig.from_json_file(a ) print(f'Building PyTorch model from configuration: {config}' ) _snake_case : Optional[Any] = BertForPreTraining(a ) # Load weights from tf checkpoint load_tf_weights_in_bert(a , a , a ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , a ) if __name__ == "__main__": _a : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _a : Any = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
704
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _a : str = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[Any] = ["""YolosFeatureExtractor"""] _a : List[Any] = ["""YolosImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Union[str, Any] = [ """YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""", """YolosForObjectDetection""", """YolosModel""", """YolosPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys _a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class _UpperCAmelCase ( __lowercase): def __init__( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Union[str, Any] = dataset _snake_case : List[str] = process _snake_case : List[Any] = params def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): _snake_case : List[str] = self.dataset[i] _snake_case : Union[str, Any] = self.process(snake_case_ , **self.params ) return processed class _UpperCAmelCase ( __lowercase): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ): _snake_case : int = loader _snake_case : Optional[int] = infer _snake_case : Any = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _snake_case : Optional[int] = None _snake_case : Any = loader_batch_size # Internal bookkeeping _snake_case : Optional[Any] = None _snake_case : Union[str, Any] = None def __len__( self ): return len(self.loader ) def __iter__( self ): _snake_case : str = iter(self.loader ) return self def lowerCamelCase__ ( self ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _snake_case : List[Any] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _snake_case : Tuple = {} for k, element in self._loader_batch_data.items(): if isinstance(snake_case_ , snake_case_ ): # Convert ModelOutput to tuple first _snake_case : List[str] = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _snake_case : Optional[int] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _snake_case : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case_ , snake_case_ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _snake_case : Optional[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _snake_case : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _snake_case : Optional[Any] = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _snake_case : int = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _snake_case : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _snake_case : str = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _snake_case : Tuple = self._loader_batch_data.__class__(snake_case_ ) self._loader_batch_index += 1 return result def lowerCamelCase__ ( self ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _snake_case : Any = next(self.iterator ) _snake_case : Optional[int] = self.infer(snake_case_ , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(snake_case_ , torch.Tensor ): _snake_case : Optional[Any] = processed else: _snake_case : Tuple = list(processed.keys() )[0] _snake_case : List[str] = processed[key] if isinstance(snake_case_ , snake_case_ ): _snake_case : str = len(snake_case_ ) else: _snake_case : Any = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _snake_case : int = observed_batch_size # Setting internal index to unwrap the batch _snake_case : Tuple = processed _snake_case : List[Any] = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class _UpperCAmelCase ( __lowercase): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ): super().__init__(snake_case_ , snake_case_ , snake_case_ ) def __iter__( self ): _snake_case : Optional[Any] = iter(self.loader ) _snake_case : Optional[int] = None return self def lowerCamelCase__ ( self ): if self.subiterator is None: _snake_case : Union[str, Any] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _snake_case : List[str] = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _snake_case : Union[str, Any] = self.infer(next(self.iterator ) , **self.params ) _snake_case : List[Any] = next(self.subiterator ) return processed class _UpperCAmelCase ( __lowercase): def __iter__( self ): _snake_case : List[str] = iter(self.loader ) return self def lowerCamelCase__ ( self ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. _snake_case : str = False _snake_case : List[Any] = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _snake_case : Optional[Any] = self.loader_batch_item() _snake_case : Union[str, Any] = item.pop("is_last" ) accumulator.append(snake_case_ ) if is_last: return accumulator while not is_last: _snake_case : Any = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(snake_case_ , torch.Tensor ): _snake_case : str = processed else: _snake_case : Union[str, Any] = list(processed.keys() )[0] _snake_case : Tuple = processed[key] if isinstance(snake_case_ , snake_case_ ): _snake_case : str = len(snake_case_ ) else: _snake_case : Optional[int] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _snake_case : Optional[int] = observed_batch_size _snake_case : Tuple = processed _snake_case : List[str] = 0 while self._loader_batch_index < self.loader_batch_size: _snake_case : List[str] = self.loader_batch_item() _snake_case : str = item.pop("is_last" ) accumulator.append(snake_case_ ) if is_last: return accumulator else: _snake_case : Optional[int] = processed _snake_case : List[Any] = item.pop("is_last" ) accumulator.append(snake_case_ ) return accumulator class _UpperCAmelCase ( __lowercase): def __init__( self , snake_case_ , snake_case_ ): _snake_case : Optional[int] = dataset _snake_case : Union[str, Any] = key def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): return self.dataset[i][self.key] class _UpperCAmelCase ( __lowercase): def __init__( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : int = dataset _snake_case : Optional[int] = keya _snake_case : Dict = keya def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
705
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Optional[int] = dataset _snake_case : str = process _snake_case : int = params def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): _snake_case : Union[str, Any] = self.dataset[i] _snake_case : Optional[Any] = self.process(snake_case_ , **self.params ) return processed class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ): _snake_case : Union[str, Any] = loader _snake_case : Tuple = infer _snake_case : List[Any] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _snake_case : int = None _snake_case : int = loader_batch_size # Internal bookkeeping _snake_case : Any = None _snake_case : Dict = None def __len__( self ): return len(self.loader ) def __iter__( self ): _snake_case : int = iter(self.loader ) return self def lowerCamelCase__ ( self ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _snake_case : List[Any] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _snake_case : int = {} for k, element in self._loader_batch_data.items(): if isinstance(snake_case_ , snake_case_ ): # Convert ModelOutput to tuple first _snake_case : Tuple = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _snake_case : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _snake_case : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case_ , snake_case_ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _snake_case : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _snake_case : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _snake_case : Tuple = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _snake_case : Union[str, Any] = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _snake_case : List[Any] = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _snake_case : int = self._loader_batch_data.__class__(snake_case_ ) self._loader_batch_index += 1 return result def lowerCamelCase__ ( self ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _snake_case : Tuple = next(self.iterator ) _snake_case : Any = self.infer(snake_case_ , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(snake_case_ , torch.Tensor ): _snake_case : Union[str, Any] = processed else: _snake_case : Optional[int] = list(processed.keys() )[0] _snake_case : List[str] = processed[key] if isinstance(snake_case_ , snake_case_ ): _snake_case : Dict = len(snake_case_ ) else: _snake_case : Optional[int] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _snake_case : Union[str, Any] = observed_batch_size # Setting internal index to unwrap the batch _snake_case : str = processed _snake_case : List[Any] = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ): super().__init__(snake_case_ , snake_case_ , snake_case_ ) def __iter__( self ): _snake_case : Tuple = iter(self.loader ) _snake_case : List[Any] = None return self def lowerCamelCase__ ( self ): if self.subiterator is None: _snake_case : Optional[Any] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _snake_case : Union[str, Any] = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _snake_case : str = self.infer(next(self.iterator ) , **self.params ) _snake_case : Tuple = next(self.subiterator ) return processed class _UpperCAmelCase ( _snake_case): def __iter__( self ): _snake_case : Optional[Any] = iter(self.loader ) return self def lowerCamelCase__ ( self ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. _snake_case : Optional[Any] = False _snake_case : Tuple = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _snake_case : Union[str, Any] = self.loader_batch_item() _snake_case : str = item.pop("is_last" ) accumulator.append(snake_case_ ) if is_last: return accumulator while not is_last: _snake_case : List[str] = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(snake_case_ , torch.Tensor ): _snake_case : Union[str, Any] = processed else: _snake_case : Tuple = list(processed.keys() )[0] _snake_case : Tuple = processed[key] if isinstance(snake_case_ , snake_case_ ): _snake_case : Any = len(snake_case_ ) else: _snake_case : List[Any] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _snake_case : Dict = observed_batch_size _snake_case : List[Any] = processed _snake_case : List[str] = 0 while self._loader_batch_index < self.loader_batch_size: _snake_case : Union[str, Any] = self.loader_batch_item() _snake_case : int = item.pop("is_last" ) accumulator.append(snake_case_ ) if is_last: return accumulator else: _snake_case : Dict = processed _snake_case : Dict = item.pop("is_last" ) accumulator.append(snake_case_ ) return accumulator class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ ): _snake_case : str = dataset _snake_case : Any = key def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): return self.dataset[i][self.key] class _UpperCAmelCase ( _snake_case): def __init__( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : int = dataset _snake_case : Any = keya _snake_case : int = keya def __len__( self ): return len(self.dataset ) def __getitem__( self , snake_case_ ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
87
0
"""simple docstring""" from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class _UpperCAmelCase ( _snake_case): __lowercase : Dict = """Salesforce/blip-image-captioning-base""" __lowercase : Optional[Any] = ( """This is a tool that generates a description of an image. It takes an input named `image` which should be the """ """image to caption, and returns a text that contains the description in English.""" ) __lowercase : Dict = """image_captioner""" __lowercase : int = AutoModelForVisionaSeq __lowercase : Any = ["""image"""] __lowercase : int = ["""text"""] def __init__( self , *snake_case_ , **snake_case_ ): requires_backends(self , ["vision"] ) super().__init__(*__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase__ ( self , snake_case_ ): return self.pre_processor(images=__UpperCamelCase , return_tensors="pt" ) def lowerCamelCase__ ( self , snake_case_ ): return self.model.generate(**__UpperCamelCase ) def lowerCamelCase__ ( self , snake_case_ ): return self.pre_processor.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )[0].strip()
706
"""simple docstring""" def a__ ( a : int ): """simple docstring""" if not isinstance(a , a ): raise TypeError("Input value must be an 'int' type" ) _snake_case : Union[str, Any] = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
87
0
"""simple docstring""" import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) _a : Any = logging.get_logger(__name__) _a : Union[str, Any] = OrderedDict( [ ("""align""", """EfficientNetImageProcessor"""), ("""beit""", """BeitImageProcessor"""), ("""bit""", """BitImageProcessor"""), ("""blip""", """BlipImageProcessor"""), ("""blip-2""", """BlipImageProcessor"""), ("""bridgetower""", """BridgeTowerImageProcessor"""), ("""chinese_clip""", """ChineseCLIPImageProcessor"""), ("""clip""", """CLIPImageProcessor"""), ("""clipseg""", """ViTImageProcessor"""), ("""conditional_detr""", """ConditionalDetrImageProcessor"""), ("""convnext""", """ConvNextImageProcessor"""), ("""convnextv2""", """ConvNextImageProcessor"""), ("""cvt""", """ConvNextImageProcessor"""), ("""data2vec-vision""", """BeitImageProcessor"""), ("""deformable_detr""", """DeformableDetrImageProcessor"""), ("""deit""", """DeiTImageProcessor"""), ("""deta""", """DetaImageProcessor"""), ("""detr""", """DetrImageProcessor"""), ("""dinat""", """ViTImageProcessor"""), ("""donut-swin""", """DonutImageProcessor"""), ("""dpt""", """DPTImageProcessor"""), ("""efficientformer""", """EfficientFormerImageProcessor"""), ("""efficientnet""", """EfficientNetImageProcessor"""), ("""flava""", """FlavaImageProcessor"""), ("""focalnet""", """BitImageProcessor"""), ("""git""", """CLIPImageProcessor"""), ("""glpn""", """GLPNImageProcessor"""), ("""groupvit""", """CLIPImageProcessor"""), ("""imagegpt""", """ImageGPTImageProcessor"""), ("""instructblip""", """BlipImageProcessor"""), ("""layoutlmv2""", """LayoutLMv2ImageProcessor"""), ("""layoutlmv3""", """LayoutLMv3ImageProcessor"""), ("""levit""", """LevitImageProcessor"""), ("""mask2former""", """Mask2FormerImageProcessor"""), ("""maskformer""", """MaskFormerImageProcessor"""), ("""mgp-str""", """ViTImageProcessor"""), ("""mobilenet_v1""", """MobileNetV1ImageProcessor"""), ("""mobilenet_v2""", """MobileNetV2ImageProcessor"""), ("""mobilevit""", """MobileViTImageProcessor"""), ("""mobilevit""", """MobileViTImageProcessor"""), ("""mobilevitv2""", """MobileViTImageProcessor"""), ("""nat""", """ViTImageProcessor"""), ("""oneformer""", """OneFormerImageProcessor"""), ("""owlvit""", """OwlViTImageProcessor"""), ("""perceiver""", """PerceiverImageProcessor"""), ("""pix2struct""", """Pix2StructImageProcessor"""), ("""poolformer""", """PoolFormerImageProcessor"""), ("""regnet""", """ConvNextImageProcessor"""), ("""resnet""", """ConvNextImageProcessor"""), ("""sam""", """SamImageProcessor"""), ("""segformer""", """SegformerImageProcessor"""), ("""swiftformer""", """ViTImageProcessor"""), ("""swin""", """ViTImageProcessor"""), ("""swin2sr""", """Swin2SRImageProcessor"""), ("""swinv2""", """ViTImageProcessor"""), ("""table-transformer""", """DetrImageProcessor"""), ("""timesformer""", """VideoMAEImageProcessor"""), ("""tvlt""", """TvltImageProcessor"""), ("""upernet""", """SegformerImageProcessor"""), ("""van""", """ConvNextImageProcessor"""), ("""videomae""", """VideoMAEImageProcessor"""), ("""vilt""", """ViltImageProcessor"""), ("""vit""", """ViTImageProcessor"""), ("""vit_hybrid""", """ViTHybridImageProcessor"""), ("""vit_mae""", """ViTImageProcessor"""), ("""vit_msn""", """ViTImageProcessor"""), ("""xclip""", """CLIPImageProcessor"""), ("""yolos""", """YolosImageProcessor"""), ] ) _a : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def a__ ( a : str ): """simple docstring""" for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: _snake_case : Any = model_type_to_module_name(a ) _snake_case : Tuple = importlib.import_module(f'.{module_name}' , "transformers.models" ) try: return getattr(a , a ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(a , "__name__" , a ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _snake_case : List[str] = importlib.import_module("transformers" ) if hasattr(a , a ): return getattr(a , a ) return None def a__ ( a : Union[str, os.PathLike] , a : Optional[Union[str, os.PathLike]] = None , a : bool = False , a : bool = False , a : Optional[Dict[str, str]] = None , a : Optional[Union[bool, str]] = None , a : Optional[str] = None , a : bool = False , **a : Any , ): """simple docstring""" _snake_case : Optional[Any] = get_file_from_repo( a , a , cache_dir=a , force_download=a , resume_download=a , proxies=a , use_auth_token=a , revision=a , local_files_only=a , ) if resolved_config_file is None: logger.info( "Could not locate the image processor configuration file, will try to use the model config instead." ) return {} with open(a , encoding="utf-8" ) as reader: return json.load(a ) class _UpperCAmelCase : def __init__( self ): raise EnvironmentError( "AutoImageProcessor is designed to be instantiated " "using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod @replace_list_option_in_docstrings(snake_case_ ) def lowerCamelCase__ ( cls , snake_case_ , **snake_case_ ): _snake_case : str = kwargs.pop("config" , snake_case_ ) _snake_case : int = kwargs.pop("trust_remote_code" , snake_case_ ) _snake_case : List[str] = True _snake_case : Union[str, Any] = ImageProcessingMixin.get_image_processor_dict(snake_case_ , **snake_case_ ) _snake_case : int = config_dict.get("image_processor_type" , snake_case_ ) _snake_case : Optional[Any] = None if "AutoImageProcessor" in config_dict.get("auto_map" , {} ): _snake_case : str = config_dict["""auto_map"""]["""AutoImageProcessor"""] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: _snake_case : Union[str, Any] = config_dict.pop("feature_extractor_type" , snake_case_ ) if feature_extractor_class is not None: logger.warning( "Could not find image processor class in the image processor config or the model config. Loading" " based on pattern matching with the model's feature extractor configuration." ) _snake_case : int = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" ) if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ): _snake_case : int = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] _snake_case : int = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" ) logger.warning( "Could not find image processor auto map in the image processor config or the model config." " Loading based on pattern matching with the model's feature extractor configuration." ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(snake_case_ , snake_case_ ): _snake_case : Any = AutoConfig.from_pretrained(snake_case_ , **snake_case_ ) # It could be in `config.image_processor_type`` _snake_case : int = getattr(snake_case_ , "image_processor_type" , snake_case_ ) if hasattr(snake_case_ , "auto_map" ) and "AutoImageProcessor" in config.auto_map: _snake_case : Optional[Any] = config.auto_map["""AutoImageProcessor"""] if image_processor_class is not None: _snake_case : Optional[Any] = image_processor_class_from_name(snake_case_ ) _snake_case : Optional[Any] = image_processor_auto_map is not None _snake_case : Dict = image_processor_class is not None or type(snake_case_ ) in IMAGE_PROCESSOR_MAPPING _snake_case : Dict = resolve_trust_remote_code( snake_case_ , snake_case_ , snake_case_ , snake_case_ ) if has_remote_code and trust_remote_code: _snake_case : Union[str, Any] = get_class_from_dynamic_module( snake_case_ , snake_case_ , **snake_case_ ) _snake_case : Optional[int] = kwargs.pop("code_revision" , snake_case_ ) if os.path.isdir(snake_case_ ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(snake_case_ , **snake_case_ ) elif image_processor_class is not None: return image_processor_class.from_dict(snake_case_ , **snake_case_ ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(snake_case_ ) in IMAGE_PROCESSOR_MAPPING: _snake_case : List[Any] = IMAGE_PROCESSOR_MAPPING[type(snake_case_ )] return image_processor_class.from_dict(snake_case_ , **snake_case_ ) raise ValueError( F'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a ' F'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following ' F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' ) @staticmethod def lowerCamelCase__ ( snake_case_ , snake_case_ ): IMAGE_PROCESSOR_MAPPING.register(snake_case_ , snake_case_ )
707
"""simple docstring""" from __future__ import annotations import requests _a : List[str] = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def a__ ( a : str , a : int = 1 , a : str = "new" , a : list | None = None ): """simple docstring""" _snake_case : Any = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(a ) - valid_terms ) ): _snake_case : Optional[int] = f'Invalid search term: {invalid_search_terms}' raise ValueError(a ) _snake_case : int = requests.get( f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , ) if response.status_code == 429: raise requests.HTTPError _snake_case : Optional[Any] = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(a )} _snake_case : Tuple = {} for id_ in range(a ): _snake_case : List[str] = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
87
0
"""simple docstring""" import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _UpperCAmelCase ( __A , __A): @register_to_config def __init__( self , *, snake_case_ = 4 , snake_case_ = 7_68 , snake_case_ , snake_case_ , ): super().__init__() _snake_case : List[str] = nn.Parameter(torch.zeros(UpperCamelCase__ ) ) # parameters for additional clip time embeddings _snake_case : Any = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) _snake_case : str = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) # parameters for encoder hidden states _snake_case : str = clip_extra_context_tokens _snake_case : str = nn.Linear( UpperCamelCase__ , self.clip_extra_context_tokens * cross_attention_dim ) _snake_case : Tuple = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) _snake_case : Union[str, Any] = nn.LayerNorm(UpperCamelCase__ ) def lowerCamelCase__ ( self , *, snake_case_ , snake_case_ , snake_case_ , snake_case_ ): if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings _snake_case : Union[str, Any] = image_embeddings.shape[0] _snake_case : List[str] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) _snake_case : Union[str, Any] = classifier_free_guidance_embeddings.expand( UpperCamelCase__ , -1 ) _snake_case : Union[str, Any] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] _snake_case : Union[str, Any] = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... _snake_case : List[Any] = self.embedding_proj(UpperCamelCase__ ) _snake_case : Optional[Any] = self.clip_image_embeddings_project_to_time_embeddings(UpperCamelCase__ ) _snake_case : Tuple = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" _snake_case : Optional[Any] = self.clip_extra_context_tokens_proj(UpperCamelCase__ ) _snake_case : int = clip_extra_context_tokens.reshape(UpperCamelCase__ , -1 , self.clip_extra_context_tokens ) _snake_case : Any = clip_extra_context_tokens.permute(0 , 2 , 1 ) _snake_case : Union[str, Any] = self.encoder_hidden_states_proj(UpperCamelCase__ ) _snake_case : Optional[Any] = self.text_encoder_hidden_states_norm(UpperCamelCase__ ) _snake_case : Any = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
708
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def a__ ( a : float , a : float , a : bool = False ): """simple docstring""" if radian_mode: return [magnitude * cos(a ), magnitude * sin(a )] return [magnitude * cos(radians(a ) ), magnitude * sin(radians(a ) )] def a__ ( a : NDArray[floataa] , a : NDArray[floataa] , a : float = 10**-1 ): """simple docstring""" _snake_case : NDArray[floataa] = cross(a , a ) _snake_case : float = sum(a ) return abs(a ) < eps if __name__ == "__main__": # Test to check if it works _a : Tuple = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) _a : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg _a : List[Any] = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) _a : List[Any] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg _a : List[str] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]]) _a : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
87
0
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _UpperCAmelCase : def __init__( self , snake_case_ , snake_case_=3 , snake_case_=32 , snake_case_=3 , snake_case_=10 , snake_case_=[10, 20, 30, 40] , snake_case_=[1, 1, 2, 1] , snake_case_=True , snake_case_=True , snake_case_="relu" , snake_case_=3 , snake_case_=None , ): _snake_case : Union[str, Any] = parent _snake_case : Tuple = batch_size _snake_case : Dict = image_size _snake_case : List[Any] = num_channels _snake_case : List[str] = embeddings_size _snake_case : Optional[Any] = hidden_sizes _snake_case : Tuple = depths _snake_case : int = is_training _snake_case : Optional[int] = use_labels _snake_case : Any = hidden_act _snake_case : Tuple = num_labels _snake_case : Union[str, Any] = scope _snake_case : Any = len(lowerCamelCase_ ) def lowerCamelCase__ ( self ): _snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case : int = None if self.use_labels: _snake_case : Dict = ids_tensor([self.batch_size] , self.num_labels ) _snake_case : int = self.get_config() return config, pixel_values, labels def lowerCamelCase__ ( self ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : int = TFResNetModel(config=lowerCamelCase_ ) _snake_case : Tuple = model(lowerCamelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : List[Any] = self.num_labels _snake_case : List[Any] = TFResNetForImageClassification(lowerCamelCase_ ) _snake_case : List[str] = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self ): _snake_case : List[Any] = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case : str = config_and_inputs _snake_case : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class _UpperCAmelCase ( a__ , a__ , unittest.TestCase): __lowercase : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () __lowercase : Any = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) __lowercase : int = False __lowercase : List[str] = False __lowercase : str = False __lowercase : Tuple = False __lowercase : Union[str, Any] = False def lowerCamelCase__ ( self ): _snake_case : List[Any] = TFResNetModelTester(self ) _snake_case : Any = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def lowerCamelCase__ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase__ ( self ): return @unittest.skip(reason="ResNet does not use inputs_embeds" ) def lowerCamelCase__ ( self ): pass @unittest.skip(reason="ResNet does not support input and output embeddings" ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): _snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : str = model_class(lowerCamelCase_ ) _snake_case : Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : List[str] = [*signature.parameters.keys()] _snake_case : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase__ ( self ): _snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase__ ( self ): def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ): _snake_case : Dict = model_class(lowerCamelCase_ ) _snake_case : str = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) _snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case : Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Optional[int] = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: _snake_case : List[Any] = layer_type _snake_case : Optional[int] = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case : Any = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase__ ( self ): _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def lowerCamelCase__ ( self ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Dict = TFResNetModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def a__ ( ): """simple docstring""" _snake_case : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class _UpperCAmelCase ( unittest.TestCase): @cached_property def lowerCamelCase__ ( self ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase__ ( self ): _snake_case : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _snake_case : str = self.default_image_processor _snake_case : List[Any] = prepare_img() _snake_case : Union[str, Any] = image_processor(images=lowerCamelCase_ , return_tensors="tf" ) # forward pass _snake_case : Tuple = model(**lowerCamelCase_ ) # verify the logits _snake_case : Union[str, Any] = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) _snake_case : str = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
709
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Optional[int] = logging.get_logger(__name__) _a : str = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class _UpperCAmelCase ( _snake_case): __lowercase : Optional[Any] = """openai-gpt""" __lowercase : Dict = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , snake_case_=4_04_78 , snake_case_=5_12 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_="cls_index" , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=0.1 , **snake_case_ , ): _snake_case : Tuple = vocab_size _snake_case : Dict = n_positions _snake_case : Any = n_embd _snake_case : Any = n_layer _snake_case : Optional[int] = n_head _snake_case : Union[str, Any] = afn _snake_case : Dict = resid_pdrop _snake_case : str = embd_pdrop _snake_case : Union[str, Any] = attn_pdrop _snake_case : str = layer_norm_epsilon _snake_case : Union[str, Any] = initializer_range _snake_case : Any = summary_type _snake_case : List[str] = summary_use_proj _snake_case : Optional[int] = summary_activation _snake_case : Union[str, Any] = summary_first_dropout _snake_case : Optional[int] = summary_proj_to_labels super().__init__(**snake_case_ )
87
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class _UpperCAmelCase : def __init__( self , snake_case_ , ): _snake_case : Dict = parent _snake_case : int = 13 _snake_case : int = 7 _snake_case : str = True _snake_case : Any = True _snake_case : Optional[int] = True _snake_case : Any = 99 _snake_case : Any = 32 _snake_case : Optional[int] = 2 _snake_case : Any = 4 _snake_case : str = 37 _snake_case : List[Any] = "gelu" _snake_case : Optional[int] = 0.1 _snake_case : Union[str, Any] = 0.1 _snake_case : Tuple = 5_12 _snake_case : Optional[int] = 16 _snake_case : Tuple = 2 _snake_case : List[Any] = 0.02 _snake_case : str = 3 _snake_case : str = 4 _snake_case : Any = None def lowerCamelCase__ ( self ): _snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : Tuple = None if self.use_input_mask: _snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case : Dict = None _snake_case : List[Any] = None _snake_case : List[Any] = None if self.use_labels: _snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _snake_case : str = ids_tensor([self.batch_size] , self.num_choices ) _snake_case : Union[str, Any] = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self ): ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : int = self.prepare_config_and_inputs() _snake_case : str = True _snake_case : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _snake_case : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : Union[str, Any] = TFEsmModel(config=UpperCamelCase__ ) _snake_case : List[str] = {"input_ids": input_ids, "attention_mask": input_mask} _snake_case : Optional[int] = model(UpperCamelCase__ ) _snake_case : Dict = [input_ids, input_mask] _snake_case : Union[str, Any] = model(UpperCamelCase__ ) _snake_case : Any = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ): _snake_case : List[str] = True _snake_case : Union[str, Any] = TFEsmModel(config=UpperCamelCase__ ) _snake_case : Union[str, Any] = { "input_ids": input_ids, "attention_mask": input_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } _snake_case : List[Any] = model(UpperCamelCase__ ) _snake_case : int = [input_ids, input_mask] _snake_case : int = model(UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ ) # Also check the case where encoder outputs are not passed _snake_case : Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : int = TFEsmForMaskedLM(config=UpperCamelCase__ ) _snake_case : Any = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _snake_case : int = self.num_labels _snake_case : str = TFEsmForTokenClassification(config=UpperCamelCase__ ) _snake_case : Tuple = {"input_ids": input_ids, "attention_mask": input_mask} _snake_case : List[str] = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self ): _snake_case : Any = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : str = config_and_inputs _snake_case : Tuple = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase): __lowercase : Tuple = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) __lowercase : List[str] = ( { '''feature-extraction''': TFEsmModel, '''fill-mask''': TFEsmForMaskedLM, '''text-classification''': TFEsmForSequenceClassification, '''token-classification''': TFEsmForTokenClassification, '''zero-shot''': TFEsmForSequenceClassification, } if is_tf_available() else {} ) __lowercase : Tuple = False __lowercase : Optional[int] = False def lowerCamelCase__ ( self ): _snake_case : Dict = TFEsmModelTester(self ) _snake_case : Tuple = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def lowerCamelCase__ ( self ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self ): _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase__ ( self ): _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase__ ) def lowerCamelCase__ ( self ): _snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def lowerCamelCase__ ( self ): _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def lowerCamelCase__ ( self ): for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Tuple = TFEsmModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @unittest.skip("Protein models do not support embedding resizing." ) def lowerCamelCase__ ( self ): pass @unittest.skip("Protein models do not support embedding resizing." ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): _snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : str = model_class(UpperCamelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer _snake_case : Dict = model.get_bias() assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) for k, v in name.items(): assert isinstance(UpperCamelCase__ , tf.Variable ) else: _snake_case : int = model.get_output_embeddings() assert x is None _snake_case : Optional[int] = model.get_bias() assert name is None @require_tf class _UpperCAmelCase ( unittest.TestCase): @slow def lowerCamelCase__ ( self ): _snake_case : Tuple = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" ) _snake_case : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) _snake_case : List[str] = model(UpperCamelCase__ )[0] _snake_case : Optional[int] = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , UpperCamelCase__ ) # compare the actual values for a slice. _snake_case : Optional[Any] = tf.constant( [ [ [8.921518, -10.589814, -6.4671307], [-6.3967156, -13.911377, -1.1211915], [-7.781247, -13.951557, -3.740592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def lowerCamelCase__ ( self ): _snake_case : Optional[int] = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" ) _snake_case : int = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) _snake_case : Dict = model(UpperCamelCase__ )[0] # compare the actual values for a slice. _snake_case : List[str] = tf.constant( [ [ [0.14443092, 0.54125327, 0.3247739], [0.30340484, 0.00526676, 0.31077722], [0.32278043, -0.24987096, 0.3414628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
710
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() _a : Tuple = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _a : int = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def a__ ( a : List[str] , a : int , a : int ): """simple docstring""" _snake_case : Union[str, Any] = state_dict.pop(a ) _snake_case : Union[str, Any] = val def a__ ( a : Tuple ): """simple docstring""" _snake_case : Tuple = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: _snake_case : Dict = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) _snake_case : Tuple = value else: _snake_case : Dict = value return new_state_dict def a__ ( a : int ): """simple docstring""" _snake_case : Any = "" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) _snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : int = in_proj_weight[:256, :] _snake_case : List[str] = in_proj_bias[:256] _snake_case : Optional[Any] = in_proj_weight[256:512, :] _snake_case : List[str] = in_proj_bias[256:512] _snake_case : Dict = in_proj_weight[-256:, :] _snake_case : Dict = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention _snake_case : List[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) _snake_case : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _snake_case : Union[str, Any] = in_proj_weight[:256, :] _snake_case : Tuple = in_proj_bias[:256] _snake_case : int = in_proj_weight[256:512, :] _snake_case : int = in_proj_bias[256:512] _snake_case : Dict = in_proj_weight[-256:, :] _snake_case : str = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention _snake_case : Dict = state_dict.pop( f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) _snake_case : Optional[int] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict _snake_case : Dict = in_proj_weight_cross_attn[:256, :] _snake_case : Any = in_proj_bias_cross_attn[:256] _snake_case : Union[str, Any] = in_proj_weight_cross_attn[256:512, :] _snake_case : Optional[int] = in_proj_bias_cross_attn[256:512] _snake_case : Any = in_proj_weight_cross_attn[-256:, :] _snake_case : str = in_proj_bias_cross_attn[-256:] def a__ ( a : str , a : int ): """simple docstring""" _snake_case , _snake_case : List[str] = image.size _snake_case : Dict = max(a , a ) _snake_case : Union[str, Any] = 800 if "detection" in checkpoint_url else 1_000 _snake_case : Any = target_max_size / current_max_size _snake_case : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def a__ ( a : str ): """simple docstring""" _snake_case : str = F.to_tensor(a ) _snake_case : Union[str, Any] = F.normalize(a , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def a__ ( a : Optional[Any] , a : Any , a : Union[str, Any] ): """simple docstring""" logger.info("Converting model..." ) # load original state dict _snake_case : Tuple = torch.hub.load_state_dict_from_url(a , map_location="cpu" ) # rename keys for src, dest in rename_keys: rename_key(a , a , a ) _snake_case : Union[str, Any] = rename_backbone_keys(a ) # query, key and value matrices need special treatment read_in_q_k_v(a ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _snake_case : int = "model." for key in state_dict.copy().keys(): if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): _snake_case : Optional[int] = state_dict.pop(a ) _snake_case : Any = val # create HuggingFace model and load state dict _snake_case : Tuple = TableTransformerConfig( backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: _snake_case : Any = 15 _snake_case : int = 2 _snake_case : Optional[Any] = {0: "table", 1: "table rotated"} _snake_case : Union[str, Any] = idalabel _snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} else: _snake_case : Any = 125 _snake_case : Union[str, Any] = 6 _snake_case : List[str] = { 0: "table", 1: "table column", 2: "table row", 3: "table column header", 4: "table projected row header", 5: "table spanning cell", } _snake_case : Any = idalabel _snake_case : Optional[int] = {v: k for k, v in idalabel.items()} _snake_case : Union[str, Any] = DetrImageProcessor( format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 ) _snake_case : str = TableTransformerForObjectDetection(a ) model.load_state_dict(a ) model.eval() # verify our conversion _snake_case : Optional[int] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png" _snake_case : Optional[Any] = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=a ) _snake_case : Dict = Image.open(a ).convert("RGB" ) _snake_case : Union[str, Any] = normalize(resize(a , a ) ).unsqueeze(0 ) _snake_case : str = model(a ) if "detection" in checkpoint_url: _snake_case : int = (1, 15, 3) _snake_case : List[str] = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) _snake_case : List[str] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: _snake_case : Union[str, Any] = (1, 125, 7) _snake_case : str = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) _snake_case : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) image_processor.save_pretrained(a ) if push_to_hub: # Push model to HF hub logger.info("Pushing model to the hub..." ) _snake_case : int = ( "microsoft/table-transformer-detection" if "detection" in checkpoint_url else "microsoft/table-transformer-structure-recognition" ) model.push_to_hub(a ) image_processor.push_to_hub(a ) if __name__ == "__main__": _a : Tuple = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _a : Any = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
87
0
"""simple docstring""" import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _UpperCAmelCase : @staticmethod def lowerCamelCase__ ( *snake_case_ , **snake_case_ ): pass @is_pipeline_test @require_vision @require_torch class _UpperCAmelCase ( unittest.TestCase): __lowercase : str = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ): _snake_case : str = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) _snake_case : int = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def lowerCamelCase__ ( self , snake_case_ , snake_case_ ): _snake_case : Optional[int] = object_detector(examples[0] , threshold=0.0 ) _snake_case : Tuple = len(lowerCAmelCase_ ) self.assertGreater(lowerCAmelCase_ , 0 ) self.assertEqual( lowerCAmelCase_ , [ { "score": ANY(lowerCAmelCase_ ), "label": ANY(lowerCAmelCase_ ), "box": {"xmin": ANY(lowerCAmelCase_ ), "ymin": ANY(lowerCAmelCase_ ), "xmax": ANY(lowerCAmelCase_ ), "ymax": ANY(lowerCAmelCase_ )}, } for i in range(lowerCAmelCase_ ) ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def lowerCamelCase__ ( self ): pass @require_torch def lowerCamelCase__ ( self ): _snake_case : Tuple = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) _snake_case : Any = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , ) self.assertEqual( nested_simplify(lowerCAmelCase_ , decimals=4 ) , [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 2_74, "xmax": 93, "ymax": 2_97}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}}, ] , ) _snake_case : List[Any] = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(lowerCAmelCase_ , decimals=4 ) , [ [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 2_74, "xmax": 93, "ymax": 2_97}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}}, ] ] , ) @require_torch @slow def lowerCamelCase__ ( self ): _snake_case : Any = pipeline("zero-shot-object-detection" ) _snake_case : Tuple = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , ) self.assertEqual( nested_simplify(lowerCAmelCase_ , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}}, ] , ) _snake_case : int = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ] , ) self.assertEqual( nested_simplify(lowerCAmelCase_ , decimals=4 ) , [ [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}}, ], [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}}, ], ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def lowerCamelCase__ ( self ): pass @require_torch @slow def lowerCamelCase__ ( self ): _snake_case : Dict = 0.2 _snake_case : List[Any] = pipeline("zero-shot-object-detection" ) _snake_case : Optional[int] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=lowerCAmelCase_ , ) self.assertEqual( nested_simplify(lowerCAmelCase_ , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}}, ] , ) @require_torch @slow def lowerCamelCase__ ( self ): _snake_case : List[str] = 2 _snake_case : Union[str, Any] = pipeline("zero-shot-object-detection" ) _snake_case : List[Any] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=lowerCAmelCase_ , ) self.assertEqual( nested_simplify(lowerCAmelCase_ , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}}, ] , )
711
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
87
0
import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class _UpperCAmelCase ( _UpperCamelCase): def __init__( self , snake_case_ , snake_case_ , snake_case_=10_24 , snake_case_=10_24 , snake_case_=3.6 ): _snake_case : Union[str, Any] = tokenizer _snake_case : str = tokenizer.bos_token_id _snake_case : List[Any] = dataset _snake_case : Union[str, Any] = seq_length _snake_case : List[str] = seq_length * chars_per_token * num_of_sequences def __iter__( self ): _snake_case : int = iter(self.dataset ) _snake_case : List[Any] = True while more_examples: _snake_case : Tuple = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(_UpperCAmelCase )["content"] ) buffer_len += len(buffer[-1] ) except StopIteration: _snake_case : Union[str, Any] = False break _snake_case : Any = tokenizer(_UpperCAmelCase , truncation=_UpperCAmelCase )['''input_ids'''] _snake_case : Dict = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(_UpperCAmelCase ) , self.seq_length ): _snake_case : Union[str, Any] = all_token_ids[i : i + self.seq_length] if len(_UpperCAmelCase ) == self.seq_length: yield torch.tensor(_UpperCAmelCase ) def a__ ( a : Tuple ): """simple docstring""" _snake_case : int = {'''streaming''': True} _snake_case : int = load_dataset(args.dataset_name , split="train" , **a_ ) _snake_case : int = ConstantLengthDataset(a_ , a_ , seq_length=args.seq_length ) _snake_case : Optional[int] = DataLoader(a_ , batch_size=args.batch_size ) return eval_dataloader def a__ ( a : Optional[int] ): """simple docstring""" model.eval() _snake_case : str = [] for step, batch in enumerate(a_ ): with torch.no_grad(): _snake_case : Dict = model(a_ , labels=a_ ) _snake_case : Optional[Any] = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(a_ ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break _snake_case : Union[str, Any] = torch.mean(torch.cat(a_ ) ) try: _snake_case : Tuple = torch.exp(a_ ) except OverflowError: _snake_case : Dict = float("inf" ) return loss.item(), perplexity.item() # Setup Accelerator _a : Union[str, Any] = Accelerator() # Parse configuration _a : Union[str, Any] = HfArgumentParser(EvaluationArguments) _a : int = parser.parse_args() set_seed(args.seed) # Logging _a : Dict = logging.getLogger(__name__) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) # Load model and tokenizer _a : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt) _a : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader _a : Optional[Any] = create_dataloader(args) # Prepare everything with our `accelerator`. _a, _a : List[Any] = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info("""Evaluating and saving model after training""") _a, _a : List[Any] = evaluate(args) logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
712
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _a : Optional[int] = logging.get_logger(__name__) _a : List[str] = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class _UpperCAmelCase ( _snake_case , _snake_case): __lowercase : List[Any] = """convnextv2""" def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=4 , snake_case_=None , snake_case_=None , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.0 , snake_case_=2_24 , snake_case_=None , snake_case_=None , **snake_case_ , ): super().__init__(**snake_case_ ) _snake_case : Tuple = num_channels _snake_case : Optional[int] = patch_size _snake_case : Tuple = num_stages _snake_case : int = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes _snake_case : str = [3, 3, 9, 3] if depths is None else depths _snake_case : int = hidden_act _snake_case : Tuple = initializer_range _snake_case : Union[str, Any] = layer_norm_eps _snake_case : Optional[int] = drop_path_rate _snake_case : Union[str, Any] = image_size _snake_case : List[Any] = ["stem"] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )] _snake_case , _snake_case : Dict = get_aligned_output_features_output_indices( out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
87
0