code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' def __lowerCamelCase ( _lowercase = 4_0_0_0_0_0_0 ) -> int: UpperCAmelCase : Optional[Any] = [0, 1] UpperCAmelCase : int = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 UpperCAmelCase : str = 0 for j in range(len(_lowercase ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(F'''{solution() = }''')
265
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 a : List[str] = get_tests_dir("""fixtures""") class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: # A mock response for an HTTP head request to emulate server down UpperCAmelCase : Tuple = mock.Mock() UpperCAmelCase : List[str] = 500 UpperCAmelCase : Any = {} UpperCAmelCase : List[str] = HTTPError UpperCAmelCase : str = {} # Download this model to make sure it's in the cache. UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head: UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # This check we did call the fake head request mock_head.assert_called() def _lowercase( self ) -> Any: # This test is for deprecated behavior and can be removed in v5 UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" ) def _lowercase( self ) -> Union[str, Any]: with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" ) self.assertIsNotNone(A ) @is_staging_test class UpperCamelCase_ ( unittest.TestCase ): @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def _lowercase( cls ) -> List[str]: try: delete_repo(token=cls._token , repo_id="""test-image-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" ) except HTTPError: pass def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> List[str]: UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> Optional[int]: CustomImageProcessor.register_for_auto_class() UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
265
1
'''simple docstring''' from __future__ import annotations from typing import TypedDict class UpperCamelCase_ ( __magic_name__ ): lowercase = 42 lowercase = 42 def __lowerCamelCase ( _lowercase ) -> list[str]: if not isinstance(_lowercase , _lowercase ): raise TypeError("""The parameter s type must be str.""" ) return [s[i:] + s[:i] for i in range(len(_lowercase ) )] def __lowerCamelCase ( _lowercase ) -> BWTTransformDict: if not isinstance(_lowercase , _lowercase ): raise TypeError("""The parameter s type must be str.""" ) if not s: raise ValueError("""The parameter s must not be empty.""" ) UpperCAmelCase : str = all_rotations(_lowercase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation UpperCAmelCase : BWTTransformDict = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(_lowercase ), } return response def __lowerCamelCase ( _lowercase , _lowercase ) -> str: if not isinstance(_lowercase , _lowercase ): raise TypeError("""The parameter bwt_string type must be str.""" ) if not bwt_string: raise ValueError("""The parameter bwt_string must not be empty.""" ) try: UpperCAmelCase : Any = int(_lowercase ) except ValueError: raise TypeError( """The parameter idx_original_string type must be int or passive""" """ of cast to int.""" ) if idx_original_string < 0: raise ValueError("""The parameter idx_original_string must not be lower than 0.""" ) if idx_original_string >= len(_lowercase ): raise ValueError( """The parameter idx_original_string must be lower than""" """ len(bwt_string).""" ) UpperCAmelCase : List[Any] = [""""""] * len(_lowercase ) for _ in range(len(_lowercase ) ): for i in range(len(_lowercase ) ): UpperCAmelCase : Union[str, Any] = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": a : Tuple = """Provide a string that I will generate its BWT transform: """ a : int = input(entry_msg).strip() a : Optional[int] = bwt_transform(s) print( F'''Burrows Wheeler transform for string \'{s}\' results ''' F'''in \'{result['bwt_string']}\'''' ) a : List[str] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""]) print( F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' ''' F'''we get original string \'{original_string}\'''' )
265
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def __lowerCamelCase ( _lowercase ) -> Tuple: return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Any = create_tensor(_lowercase ) UpperCAmelCase : Union[str, Any] = gather(_lowercase ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : Any = [state.process_index] UpperCAmelCase : Union[str, Any] = gather_object(_lowercase ) assert len(_lowercase ) == state.num_processes, F'''{gathered_obj}, {len(_lowercase )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}''' def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Optional[int] = create_tensor(_lowercase ) UpperCAmelCase : List[str] = broadcast(_lowercase ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Tuple: # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device ) else: UpperCAmelCase : Tuple = torch.arange(state.num_processes ).to(state.device ) UpperCAmelCase : Optional[Any] = pad_across_processes(_lowercase ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def __lowerCamelCase ( _lowercase ) -> Dict: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Optional[Any] = create_tensor(_lowercase ) UpperCAmelCase : Optional[Any] = reduce(_lowercase , """sum""" ) UpperCAmelCase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[Any]: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Tuple = create_tensor(_lowercase ) UpperCAmelCase : Optional[int] = reduce(_lowercase , """mean""" ) UpperCAmelCase : str = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[int]: # For xla_spawn (TPUs) main() def __lowerCamelCase ( ) -> int: UpperCAmelCase : List[Any] = PartialState() state.print(F'''State: {state}''' ) state.print("""testing gather""" ) test_gather(_lowercase ) state.print("""testing gather_object""" ) test_gather_object(_lowercase ) state.print("""testing broadcast""" ) test_broadcast(_lowercase ) state.print("""testing pad_across_processes""" ) test_pad_across_processes(_lowercase ) state.print("""testing reduce_sum""" ) test_reduce_sum(_lowercase ) state.print("""testing reduce_mean""" ) test_reduce_mean(_lowercase ) if __name__ == "__main__": main()
265
1
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> int: if len(_lowercase ) != len(_lowercase ): raise ValueError("""String lengths must match!""" ) UpperCAmelCase : Any = 0 for chara, chara in zip(_lowercase , _lowercase ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
265
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ ( __magic_name__ ): def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]: super().__init__() self.register_modules( vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , ) def _lowercase( self , A = "auto" ) -> List[Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A ) def _lowercase( self ) -> Dict: self.enable_attention_slicing(A ) @torch.no_grad() def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]: if isinstance(A , A ): UpperCAmelCase : List[str] = 1 elif isinstance(A , A ): UpperCAmelCase : Dict = len(A ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(A )}.''' ) # get prompt text embeddings UpperCAmelCase : List[str] = self.tokenizer( A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCAmelCase : List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 ) UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCAmelCase : Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCAmelCase : List[str] if negative_prompt is None: UpperCAmelCase : Any = [""""""] elif type(A ) is not type(A ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !=''' f''' {type(A )}.''' ) elif isinstance(A , A ): UpperCAmelCase : Optional[int] = [negative_prompt] elif batch_size != len(A ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: UpperCAmelCase : Any = negative_prompt UpperCAmelCase : Dict = text_input_ids.shape[-1] UpperCAmelCase : List[Any] = self.tokenizer( A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , ) UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase : int = uncond_embeddings.shape[1] UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 ) UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) UpperCAmelCase : str = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCAmelCase : Dict = torch.randn( A , generator=A , device="""cpu""" , dtype=A ).to(self.device ) UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to( self.device ) else: UpperCAmelCase : int = torch.randn( A , generator=A , device=self.device , dtype=A ) UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A ) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCAmelCase : Optional[Any] = latents_reference.to(self.device ) UpperCAmelCase : Tuple = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2 UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2 UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx UpperCAmelCase : List[str] = 0 if dy < 0 else dy UpperCAmelCase : Union[str, Any] = max(-dx , 0 ) UpperCAmelCase : List[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(A ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCAmelCase : Optional[Any] = {} if accepts_eta: UpperCAmelCase : List[str] = eta for i, t in enumerate(self.progress_bar(A ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase : str = self.scheduler.scale_model_input(A , A ) # predict the noise residual UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample # perform guidance if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 ) UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A , A , A ) UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents UpperCAmelCase : Tuple = self.vae.decode(A ).sample UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to( self.device ) UpperCAmelCase , UpperCAmelCase : int = self.safety_checker( images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: UpperCAmelCase : Any = None if output_type == "pil": UpperCAmelCase : int = self.numpy_to_pil(A ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
265
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: UpperCAmelCase : int = tempfile.mkdtemp() UpperCAmelCase : Any = BlipImageProcessor() UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) UpperCAmelCase : Optional[Any] = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) UpperCAmelCase : Any = InstructBlipProcessor(A , A , A ) processor.save_pretrained(self.tmpdirname ) def _lowercase( self , **A ) -> Optional[int]: return AutoProcessor.from_pretrained(self.tmpdirname , **A ).tokenizer def _lowercase( self , **A ) -> Dict: return AutoProcessor.from_pretrained(self.tmpdirname , **A ).image_processor def _lowercase( self , **A ) -> Dict: return AutoProcessor.from_pretrained(self.tmpdirname , **A ).qformer_tokenizer def _lowercase( self ) -> List[str]: shutil.rmtree(self.tmpdirname ) def _lowercase( self ) -> List[str]: UpperCAmelCase : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCAmelCase : Any = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase( self ) -> int: UpperCAmelCase : Any = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) UpperCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=A , padding_value=1.0 ) UpperCAmelCase : Union[str, Any] = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A ) self.assertIsInstance(processor.qformer_tokenizer , A ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = self.get_image_processor() UpperCAmelCase : List[Any] = self.get_tokenizer() UpperCAmelCase : int = self.get_qformer_tokenizer() UpperCAmelCase : List[str] = InstructBlipProcessor( tokenizer=A , image_processor=A , qformer_tokenizer=A ) UpperCAmelCase : int = self.prepare_image_inputs() UpperCAmelCase : Any = image_processor(A , return_tensors="""np""" ) UpperCAmelCase : Tuple = processor(images=A , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : Optional[int] = self.get_image_processor() UpperCAmelCase : int = self.get_tokenizer() UpperCAmelCase : Dict = self.get_qformer_tokenizer() UpperCAmelCase : Any = InstructBlipProcessor( tokenizer=A , image_processor=A , qformer_tokenizer=A ) UpperCAmelCase : List[Any] = """lower newer""" UpperCAmelCase : str = processor(text=A ) UpperCAmelCase : List[str] = tokenizer(A , return_token_type_ids=A ) UpperCAmelCase : int = qformer_tokenizer(A , return_token_type_ids=A ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] ) def _lowercase( self ) -> int: UpperCAmelCase : str = self.get_image_processor() UpperCAmelCase : Optional[Any] = self.get_tokenizer() UpperCAmelCase : str = self.get_qformer_tokenizer() UpperCAmelCase : List[str] = InstructBlipProcessor( tokenizer=A , image_processor=A , qformer_tokenizer=A ) UpperCAmelCase : List[Any] = """lower newer""" UpperCAmelCase : List[str] = self.prepare_image_inputs() UpperCAmelCase : Union[str, Any] = processor(text=A , images=A ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) # test if it raises when no input is passed with pytest.raises(A ): processor() def _lowercase( self ) -> Dict: UpperCAmelCase : Optional[int] = self.get_image_processor() UpperCAmelCase : List[Any] = self.get_tokenizer() UpperCAmelCase : List[str] = self.get_qformer_tokenizer() UpperCAmelCase : Optional[Any] = InstructBlipProcessor( tokenizer=A , image_processor=A , qformer_tokenizer=A ) UpperCAmelCase : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase : Tuple = processor.batch_decode(A ) UpperCAmelCase : Tuple = tokenizer.batch_decode(A ) self.assertListEqual(A , A ) def _lowercase( self ) -> int: UpperCAmelCase : Union[str, Any] = self.get_image_processor() UpperCAmelCase : Optional[int] = self.get_tokenizer() UpperCAmelCase : List[Any] = self.get_qformer_tokenizer() UpperCAmelCase : Union[str, Any] = InstructBlipProcessor( tokenizer=A , image_processor=A , qformer_tokenizer=A ) UpperCAmelCase : Dict = """lower newer""" UpperCAmelCase : Any = self.prepare_image_inputs() UpperCAmelCase : Dict = processor(text=A , images=A ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
265
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : def __init__( self , A , A=12 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=512 , A=0.0_2 , A=0 , A=None , ) -> Any: UpperCAmelCase : Optional[Any] = parent UpperCAmelCase : str = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : int = use_input_mask UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : Dict = vocab_size UpperCAmelCase : str = hidden_size UpperCAmelCase : List[Any] = projection_dim UpperCAmelCase : Tuple = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : Optional[Any] = intermediate_size UpperCAmelCase : Any = dropout UpperCAmelCase : List[Any] = attention_dropout UpperCAmelCase : Optional[Any] = max_position_embeddings UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Optional[Any] = scope UpperCAmelCase : Union[str, Any] = bos_token_id def _lowercase( self ) -> Tuple: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase : Tuple = input_mask.numpy() UpperCAmelCase , UpperCAmelCase : int = input_mask.shape UpperCAmelCase : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(A ): UpperCAmelCase : Tuple = 1 UpperCAmelCase : Optional[Any] = 0 UpperCAmelCase : int = self.get_config() return config, input_ids, tf.convert_to_tensor(A ) def _lowercase( self ) -> int: return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : int = TFBlipTextModel(config=A ) UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , training=A ) UpperCAmelCase : int = model(A , training=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Dict = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = (TFBlipTextModel,) if is_tf_available() else () lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> int: UpperCAmelCase : Union[str, Any] = BlipTextModelTester(self ) UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 ) def _lowercase( self ) -> Tuple: self.config_tester.run_common_tests() def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def _lowercase( self ) -> List[str]: pass def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def _lowercase( self ) -> Union[str, Any]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Dict: pass @slow def _lowercase( self ) -> Dict: for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Any = TFBlipTextModel.from_pretrained(A ) self.assertIsNotNone(A ) def _lowercase( self , A=True ) -> str: super().test_pt_tf_model_equivalence(allow_missing_keys=A )
265
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a : Any = { """configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""], """tokenization_roberta""": ["""RobertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Dict = ["""RobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = [ """ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaForCausalLM""", """RobertaForMaskedLM""", """RobertaForMultipleChoice""", """RobertaForQuestionAnswering""", """RobertaForSequenceClassification""", """RobertaForTokenClassification""", """RobertaModel""", """RobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Dict = [ """TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaForCausalLM""", """TFRobertaForMaskedLM""", """TFRobertaForMultipleChoice""", """TFRobertaForQuestionAnswering""", """TFRobertaForSequenceClassification""", """TFRobertaForTokenClassification""", """TFRobertaMainLayer""", """TFRobertaModel""", """TFRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[str] = [ """FlaxRobertaForCausalLM""", """FlaxRobertaForMaskedLM""", """FlaxRobertaForMultipleChoice""", """FlaxRobertaForQuestionAnswering""", """FlaxRobertaForSequenceClassification""", """FlaxRobertaForTokenClassification""", """FlaxRobertaModel""", """FlaxRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys a : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification a : str = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co a : int = """main""" # Default branch name a : Any = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2""" # One particular commit (not the top of `main`) a : str = """aaaaaaa""" # This commit does not exist, so we should 404. a : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684""" # Sha-1 of config.json on the top of `main`, for checking purposes a : Any = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3""" @contextlib.contextmanager def __lowerCamelCase ( ) -> List[str]: print("""Welcome!""" ) yield print("""Bye!""" ) @contextlib.contextmanager def __lowerCamelCase ( ) -> Optional[int]: print("""Bonjour!""" ) yield print("""Au revoir!""" ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> List[Any]: # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec("""transformers""" ) is not None class UpperCamelCase_ ( unittest.TestCase ): @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Tuple: with ContextManagers([] ): print("""Transformers are awesome!""" ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Dict: with ContextManagers([context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Union[str, Any]: with ContextManagers([context_fr(), context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" ) @require_torch def _lowercase( self ) -> Optional[int]: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_tf def _lowercase( self ) -> int: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_flax def _lowercase( self ) -> Any: # Flax models don't have labels self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , [] )
265
1
'''simple docstring''' import math import random def __lowerCamelCase ( _lowercase , _lowercase = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value a : Union[str, Any] = 0.0_2 def __lowerCamelCase ( _lowercase , _lowercase ) -> float: UpperCAmelCase : Any = float(2 * (random.randint(1 , 1_0_0 )) - 1 ) for _ in range(_lowercase ): # Forward propagation UpperCAmelCase : str = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? UpperCAmelCase : List[str] = (expected / 1_0_0) - layer_a # Error delta UpperCAmelCase : Dict = layer_1_error * sigmoid_function(_lowercase , _lowercase ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 1_0_0 if __name__ == "__main__": import doctest doctest.testmod() a : List[str] = int(input("""Expected value: """)) a : List[str] = int(input("""Number of propagations: """)) print(forward_propagation(expected, number_propagations))
265
'''simple docstring''' from itertools import count def __lowerCamelCase ( _lowercase = 5_0 ) -> int: UpperCAmelCase : Any = [1] * min_block_length for n in count(_lowercase ): fill_count_functions.append(1 ) for block_length in range(_lowercase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_0_0_0_0_0_0: break return n if __name__ == "__main__": print(F'''{solution() = }''')
265
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = StableDiffusionInstructPixaPixPipeline lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'} lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS def _lowercase( self ) -> Any: torch.manual_seed(0 ) UpperCAmelCase : Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) UpperCAmelCase : Tuple = PNDMScheduler(skip_prk_steps=A ) torch.manual_seed(0 ) UpperCAmelCase : Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCAmelCase : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) UpperCAmelCase : Tuple = CLIPTextModel(A ) UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCAmelCase : Optional[Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _lowercase( self , A , A=0 ) -> str: UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : Any = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : str = torch.manual_seed(A ) else: UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : List[Any] = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """image_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def _lowercase( self ) -> str: UpperCAmelCase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase : str = self.get_dummy_components() UpperCAmelCase : Any = StableDiffusionInstructPixaPixPipeline(**A ) UpperCAmelCase : str = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : List[Any] = self.get_dummy_inputs(A ) UpperCAmelCase : Tuple = sd_pipe(**A ).images UpperCAmelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase : str = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase( self ) -> Tuple: UpperCAmelCase : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase : List[str] = self.get_dummy_components() UpperCAmelCase : str = StableDiffusionInstructPixaPixPipeline(**A ) UpperCAmelCase : Any = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(A ) UpperCAmelCase : Union[str, Any] = """french fries""" UpperCAmelCase : Optional[Any] = sd_pipe(**A , negative_prompt=A ) UpperCAmelCase : int = output.images UpperCAmelCase : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase : str = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase : Optional[Any] = self.get_dummy_components() UpperCAmelCase : str = StableDiffusionInstructPixaPixPipeline(**A ) UpperCAmelCase : Optional[int] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(A ) UpperCAmelCase : Any = [inputs["""prompt"""]] * 2 UpperCAmelCase : str = np.array(inputs["""image"""] ).astype(np.floataa ) / 2_5_5.0 UpperCAmelCase : Any = torch.from_numpy(A ).unsqueeze(0 ).to(A ) UpperCAmelCase : int = image / 2 + 0.5 UpperCAmelCase : int = image.permute(0 , 3 , 1 , 2 ) UpperCAmelCase : Tuple = image.repeat(2 , 1 , 1 , 1 ) UpperCAmelCase : Optional[int] = sd_pipe(**A ).images UpperCAmelCase : Tuple = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) UpperCAmelCase : Optional[int] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase : Tuple = self.get_dummy_components() UpperCAmelCase : Tuple = EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" ) UpperCAmelCase : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**A ) UpperCAmelCase : Dict = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : int = self.get_dummy_inputs(A ) UpperCAmelCase : str = sd_pipe(**A ).images UpperCAmelCase : Any = image[0, -3:, -3:, -1] UpperCAmelCase : Optional[int] = [round(A , 4 ) for x in image_slice.flatten().tolist()] print(""",""".join([str(A ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) UpperCAmelCase : Optional[int] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase( self ) -> Optional[Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def _lowercase( self ) -> Tuple: UpperCAmelCase : Union[str, Any] = self.get_dummy_components() UpperCAmelCase : List[Any] = StableDiffusionInstructPixaPixPipeline(**A ) UpperCAmelCase : Tuple = VaeImageProcessor(do_resize=A , do_normalize=A ) UpperCAmelCase : List[Any] = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : str = pipe(**self.get_dummy_inputs_by_type(A , input_image_type="""pt""" ) )[0] UpperCAmelCase : Any = components["""vae"""] UpperCAmelCase : Dict = self.get_dummy_inputs_by_type(A , input_image_type="""pt""" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): UpperCAmelCase : Tuple = vae.encode(inputs[image_param] ).latent_dist.mode() UpperCAmelCase : Dict = pipe(**A )[0] UpperCAmelCase : Optional[Any] = np.abs(out - out_latents_inputs ).max() self.assertLess(A , 1e-4 , """passing latents as image input generate different result from passing image""" ) @slow @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase( self , A=0 ) -> Union[str, Any]: UpperCAmelCase : Any = torch.manual_seed(A ) UpperCAmelCase : Optional[Any] = load_image( """https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" ) UpperCAmelCase : str = { """prompt""": """turn him into a cyborg""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """image_guidance_scale""": 1.0, """output_type""": """numpy""", } return inputs def _lowercase( self ) -> Tuple: UpperCAmelCase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) pipe.enable_attention_slicing() UpperCAmelCase : str = self.get_inputs() UpperCAmelCase : List[Any] = pipe(**A ).images UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) UpperCAmelCase : Optional[int] = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=A ) UpperCAmelCase : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) pipe.enable_attention_slicing() UpperCAmelCase : Dict = self.get_inputs() UpperCAmelCase : List[Any] = pipe(**A ).images UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) UpperCAmelCase : Optional[int] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=A ) UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) pipe.enable_attention_slicing() UpperCAmelCase : Optional[int] = self.get_inputs() UpperCAmelCase : Dict = pipe(**A ).images UpperCAmelCase : int = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) UpperCAmelCase : Any = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _lowercase( self ) -> List[str]: UpperCAmelCase : int = 0 def callback_fn(A , A , A ) -> None: UpperCAmelCase : str = True nonlocal number_of_steps number_of_steps += 1 if step == 1: UpperCAmelCase : Optional[Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) UpperCAmelCase : Union[str, Any] = latents[0, -3:, -3:, -1] UpperCAmelCase : Tuple = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: UpperCAmelCase : List[Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) UpperCAmelCase : str = latents[0, -3:, -3:, -1] UpperCAmelCase : List[Any] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 UpperCAmelCase : Tuple = False UpperCAmelCase : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : Optional[int] = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) pipe.enable_attention_slicing() UpperCAmelCase : Optional[int] = self.get_inputs() pipe(**A , callback=A , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _lowercase( self ) -> List[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : Union[str, Any] = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase : Optional[int] = self.get_inputs() UpperCAmelCase : Optional[int] = pipe(**A ) UpperCAmelCase : Optional[int] = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def _lowercase( self ) -> Tuple: UpperCAmelCase : Tuple = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 UpperCAmelCase : int = inputs["""image"""].resize((504, 504) ) UpperCAmelCase : Union[str, Any] = """timbrooks/instruct-pix2pix""" UpperCAmelCase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained( A , safety_checker=A , ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) pipe.enable_attention_slicing() UpperCAmelCase : List[str] = pipe(**A ) UpperCAmelCase : Optional[int] = output.images[0] UpperCAmelCase : List[str] = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) UpperCAmelCase : Optional[int] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
265
'''simple docstring''' from __future__ import annotations import math class UpperCamelCase_ : def __init__( self , A ) -> None: UpperCAmelCase : Optional[int] = size # approximate the overall size of segment tree with given value UpperCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )] # create array to store lazy update UpperCAmelCase : Any = [0 for i in range(0 , 4 * size )] UpperCAmelCase : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update def _lowercase( self , A ) -> int: return idx * 2 def _lowercase( self , A ) -> int: return idx * 2 + 1 def _lowercase( self , A , A , A , A ) -> None: if left_element == right_element: UpperCAmelCase : str = a[left_element - 1] else: UpperCAmelCase : Tuple = (left_element + right_element) // 2 self.build(self.left(A ) , A , A , A ) self.build(self.right(A ) , mid + 1 , A , A ) UpperCAmelCase : str = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) def _lowercase( self , A , A , A , A , A , A ) -> bool: if self.flag[idx] is True: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : int = False if left_element != right_element: UpperCAmelCase : List[str] = self.lazy[idx] UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : int = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: UpperCAmelCase : Optional[Any] = val if left_element != right_element: UpperCAmelCase : Tuple = val UpperCAmelCase : int = val UpperCAmelCase : Any = True UpperCAmelCase : str = True return True UpperCAmelCase : str = (left_element + right_element) // 2 self.update(self.left(A ) , A , A , A , A , A ) self.update(self.right(A ) , mid + 1 , A , A , A , A ) UpperCAmelCase : List[str] = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) return True def _lowercase( self , A , A , A , A , A ) -> int | float: if self.flag[idx] is True: UpperCAmelCase : Any = self.lazy[idx] UpperCAmelCase : Any = False if left_element != right_element: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : Tuple = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : Tuple = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] UpperCAmelCase : Dict = (left_element + right_element) // 2 UpperCAmelCase : List[Any] = self.query(self.left(A ) , A , A , A , A ) UpperCAmelCase : str = self.query(self.right(A ) , mid + 1 , A , A , A ) return max(A , A ) def __str__( self ) -> str: return str([self.query(1 , 1 , self.size , A , A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": a : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] a : Optional[Any] = 1_5 a : Union[str, Any] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
265
1
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> int: return int((input_a, input_a).count(1 ) != 0 ) def __lowerCamelCase ( ) -> None: assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
265
'''simple docstring''' from PIL import Image def __lowerCamelCase ( _lowercase , _lowercase ) -> Image: def brightness(_lowercase ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(_lowercase ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 a : Optional[Any] = change_brightness(img, 1_0_0) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
265
1
'''simple docstring''' import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = RoFormerTokenizer lowercase = RoFormerTokenizerFast lowercase = True lowercase = True def _lowercase( self ) -> Tuple: super().setUp() def _lowercase( self , **A ) -> Optional[int]: return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **A ) def _lowercase( self , **A ) -> str: return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **A ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Union[str, Any] = """永和服装饰品有限公司,今天天气非常好""" UpperCAmelCase : Optional[int] = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好""" return input_text, output_text def _lowercase( self ) -> Tuple: UpperCAmelCase : Dict = self.get_tokenizer() UpperCAmelCase , UpperCAmelCase : Tuple = self.get_chinese_input_output_texts() UpperCAmelCase : Any = tokenizer.tokenize(A ) self.assertListEqual(A , output_text.split() ) UpperCAmelCase : Tuple = tokens + [tokenizer.unk_token] UpperCAmelCase : Tuple = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A ) def _lowercase( self ) -> List[str]: UpperCAmelCase : List[str] = self.get_rust_tokenizer() UpperCAmelCase , UpperCAmelCase : int = self.get_chinese_input_output_texts() UpperCAmelCase : Any = tokenizer.tokenize(A ) self.assertListEqual(A , output_text.split() ) UpperCAmelCase : Optional[Any] = tokens + [tokenizer.unk_token] UpperCAmelCase : Union[str, Any] = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A ) def _lowercase( self ) -> Dict: pass def _lowercase( self ) -> List[str]: pass def _lowercase( self ) -> List[Any]: pass
265
'''simple docstring''' import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class UpperCamelCase_ : def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Optional[int]: UpperCAmelCase : List[Any] = parent UpperCAmelCase : Optional[int] = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : Dict = use_input_mask UpperCAmelCase : str = use_token_type_ids UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Dict = hidden_size UpperCAmelCase : Dict = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : int = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : List[str] = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : str = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : List[str] = type_sequence_label_size UpperCAmelCase : int = initializer_range UpperCAmelCase : str = num_labels UpperCAmelCase : Optional[int] = num_choices UpperCAmelCase : Dict = scope UpperCAmelCase : Union[str, Any] = vocab_size - 1 def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Any = None if self.use_input_mask: UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : List[str] = None if self.use_labels: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Optional[int] = self.get_config() return config, input_ids, input_mask, token_labels def _lowercase( self ) -> Optional[Any]: return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.prepare_config_and_inputs() UpperCAmelCase : Any = True return config, input_ids, input_mask, token_labels def _lowercase( self , A , A , A ) -> int: UpperCAmelCase : str = GPTNeoXModel(config=A ) model.to(A ) model.eval() UpperCAmelCase : List[str] = model(A , attention_mask=A ) UpperCAmelCase : List[str] = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A ) -> Optional[int]: UpperCAmelCase : str = True UpperCAmelCase : Optional[Any] = GPTNeoXModel(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = model(A , attention_mask=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A , A ) -> List[str]: UpperCAmelCase : Tuple = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase( self , A , A , A , A ) -> Tuple: UpperCAmelCase : List[str] = self.num_labels UpperCAmelCase : Any = GPTNeoXForQuestionAnswering(A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase( self , A , A , A , A ) -> int: UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase( self , A , A , A , A ) -> str: UpperCAmelCase : List[Any] = self.num_labels UpperCAmelCase : Tuple = GPTNeoXForTokenClassification(A ) model.to(A ) model.eval() UpperCAmelCase : int = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = True UpperCAmelCase : str = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() # first forward pass UpperCAmelCase : List[str] = model(A , attention_mask=A , use_cache=A ) UpperCAmelCase : List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A ) UpperCAmelCase : Any = output_from_no_past["""hidden_states"""][0] UpperCAmelCase : List[str] = model( A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0] # select random slice UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) ) def _lowercase( self ) -> int: UpperCAmelCase : Tuple = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowercase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = GPTNeoXModelTester(self ) UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 ) def _lowercase( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> Optional[Any]: # This regression test was failing with PyTorch < 1.3 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A ) def _lowercase( self ) -> int: UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*A ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def _lowercase( self ) -> Any: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def _lowercase( self ) -> Optional[int]: pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def _lowercase( self , A ) -> str: UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : int = ids_tensor([1, 10] , config.vocab_size ) UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Dict = GPTNeoXModel(A ) original_model.to(A ) original_model.eval() UpperCAmelCase : List[str] = original_model(A ).last_hidden_state UpperCAmelCase : Any = original_model(A ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Any = {"""type""": scaling_type, """factor""": 1_0.0} UpperCAmelCase : str = GPTNeoXModel(A ) scaled_model.to(A ) scaled_model.eval() UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A , A , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: UpperCAmelCase : int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(A ) UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 UpperCAmelCase : List[str] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" UpperCAmelCase : Union[str, Any] = model.generate(**A , do_sample=A , max_new_tokens=20 ) UpperCAmelCase : Tuple = tokenizer.batch_decode(A )[0] self.assertEqual(A , A )
265
1
'''simple docstring''' import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class UpperCamelCase_ ( __magic_name__ ): def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Tuple = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type , pa.intaa() ) def _lowercase( self ) -> Dict: with self.assertRaises(A ): UpperCAmelCase : Any = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() ) def _lowercase( self ) -> Optional[int]: with self.assertRaises(A ): UpperCAmelCase : Dict = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : Optional[Any] = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def _lowercase( self ) -> Union[str, Any]: with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): UpperCAmelCase : int = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : Union[str, Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def _lowercase( self ) -> str: UpperCAmelCase : Any = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : str = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def _lowercase( self ) -> List[str]: with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): UpperCAmelCase : int = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def _lowercase( self ) -> str: UpperCAmelCase : Dict = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) @require_pil def _lowercase( self ) -> List[Any]: import PIL.Image UpperCAmelCase : List[str] = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) ) with patch( """datasets.arrow_writer.cast_to_python_objects""" , side_effect=A ) as mock_cast_to_python_objects: UpperCAmelCase : int = pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("""optimize_list_casting""" , A ) self.assertFalse(kwargs["""optimize_list_casting"""] ) def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]: UpperCAmelCase : Union[str, Any] = pa.BufferReader(_lowercase ) if isinstance(_lowercase , pa.Buffer ) else pa.memory_map(_lowercase ) UpperCAmelCase : int = pa.ipc.open_stream(_lowercase ) UpperCAmelCase : pa.Table = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = pa.BufferOutputStream() UpperCAmelCase : int = pa.schema(_lowercase ) if fields else None with ArrowWriter(stream=_lowercase , schema=_lowercase , writer_batch_size=_lowercase ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCAmelCase , UpperCAmelCase : List[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCAmelCase : Tuple = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_lowercase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def __lowerCamelCase ( ) -> List[Any]: UpperCAmelCase : Optional[Any] = pa.BufferOutputStream() UpperCAmelCase : List[str] = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} ) with ArrowWriter(stream=_lowercase , features=_lowercase ) as writer: writer.write({"""labels""": 0} ) writer.write({"""labels""": 1} ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata UpperCAmelCase : List[Any] = pa.BufferReader(output.getvalue() ) UpperCAmelCase : Union[str, Any] = pa.ipc.open_stream(_lowercase ) UpperCAmelCase : pa.Table = f.read_all() UpperCAmelCase : int = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(_lowercase ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] ) def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : int = pa.BufferOutputStream() with ArrowWriter( stream=_lowercase , writer_batch_size=_lowercase , hash_salt="""split_name""" , check_duplicates=_lowercase , ) as writer: with pytest.raises(_lowercase ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] ) UpperCAmelCase , UpperCAmelCase : str = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : Any = pa.BufferOutputStream() with ArrowWriter( stream=_lowercase , writer_batch_size=_lowercase , hash_salt="""split_name""" , check_duplicates=_lowercase , ) as writer: with pytest.raises(_lowercase ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 ) UpperCAmelCase , UpperCAmelCase : Dict = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] ) def __lowerCamelCase ( _lowercase ) -> Dict: UpperCAmelCase : Optional[int] = pa.BufferOutputStream() with ArrowWriter( stream=_lowercase , writer_batch_size=_lowercase , hash_salt="""split_name""" , check_duplicates=_lowercase , ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 ) UpperCAmelCase , UpperCAmelCase : List[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __lowerCamelCase ( _lowercase , _lowercase ) -> str: UpperCAmelCase : List[str] = pa.BufferOutputStream() UpperCAmelCase : Dict = pa.schema(_lowercase ) if fields else None with ArrowWriter(stream=_lowercase , schema=_lowercase , writer_batch_size=_lowercase ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) writer.write_batch({"""col_1""": [], """col_2""": []} ) UpperCAmelCase , UpperCAmelCase : List[str] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCAmelCase : int = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_lowercase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple: UpperCAmelCase : int = pa.BufferOutputStream() UpperCAmelCase : Any = pa.schema(_lowercase ) if fields else None with ArrowWriter(stream=_lowercase , schema=_lowercase , writer_batch_size=_lowercase ) as writer: writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) ) UpperCAmelCase , UpperCAmelCase : Tuple = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCAmelCase : Union[str, Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_lowercase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __lowerCamelCase ( _lowercase , _lowercase ) -> List[Any]: UpperCAmelCase : Dict = pa.BufferOutputStream() UpperCAmelCase : Optional[int] = pa.schema(_lowercase ) if fields else None with ArrowWriter(stream=_lowercase , schema=_lowercase , writer_batch_size=_lowercase ) as writer: writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) ) writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) ) UpperCAmelCase , UpperCAmelCase : Any = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCAmelCase : int = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_lowercase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def __lowerCamelCase ( ) -> List[Any]: with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase : Union[str, Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()} UpperCAmelCase : Any = os.path.join(_lowercase , """test.arrow""" ) with ArrowWriter(path=_lowercase , schema=pa.schema(_lowercase ) ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) UpperCAmelCase , UpperCAmelCase : List[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(_lowercase , metadata=writer._schema.metadata ) _check_output(_lowercase , 1 ) def __lowerCamelCase ( _lowercase ) -> Optional[Any]: if pa.types.is_list(_lowercase ): return get_base_dtype(arr_type.value_type ) else: return arr_type def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple: if isinstance(lst[0] , _lowercase ): change_first_primitive_element_in_list(lst[0] , _lowercase ) else: UpperCAmelCase : Dict = value @pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]: UpperCAmelCase : Union[str, Any] = pa.array(TypedSequence(_lowercase , optimized_int_type=_lowercase ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( """col, expected_dtype""" , [ ("""attention_mask""", pa.inta()), ("""special_tokens_mask""", pa.inta()), ("""token_type_ids""", pa.inta()), ("""input_ids""", pa.intaa()), ("""other""", pa.intaa()), ] , ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Dict: # in range UpperCAmelCase : Optional[int] = pa.array(OptimizedTypedSequence(_lowercase , col=_lowercase ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications UpperCAmelCase : int = copy.deepcopy(_lowercase ) UpperCAmelCase : Tuple = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(_lowercase , _lowercase ) UpperCAmelCase : Any = pa.array(OptimizedTypedSequence(_lowercase , col=_lowercase ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("""raise_exception""" , [False, True] ) def __lowerCamelCase ( _lowercase , _lowercase ) -> Dict: UpperCAmelCase : List[str] = str(tmp_path / """dataset-train.arrow""" ) try: with ArrowWriter(path=_lowercase ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def __lowerCamelCase ( _lowercase ) -> int: UpperCAmelCase : Optional[Any] = """mock://dataset-train.arrow""" with ArrowWriter(path=_lowercase , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(_lowercase ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCAmelCase , UpperCAmelCase : str = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(_lowercase ) def __lowerCamelCase ( ) -> Optional[int]: UpperCAmelCase : str = pa.BufferOutputStream() with ParquetWriter(stream=_lowercase ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCAmelCase , UpperCAmelCase : List[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 UpperCAmelCase : str = pa.BufferReader(output.getvalue() ) UpperCAmelCase : pa.Table = pq.read_table(_lowercase ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("""embed_local_files""" , [False, True] ) def __lowerCamelCase ( _lowercase , _lowercase ) -> Dict: import PIL.Image UpperCAmelCase : Union[str, Any] = str(tmp_path / """test_image_rgb.jpg""" ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_lowercase , format="""png""" ) UpperCAmelCase : Optional[int] = pa.BufferOutputStream() with ParquetWriter( stream=_lowercase , features=Features({"""image""": Image()} ) , embed_local_files=_lowercase ) as writer: writer.write({"""image""": image_path} ) writer.finalize() UpperCAmelCase : Any = pa.BufferReader(output.getvalue() ) UpperCAmelCase : pa.Table = pq.read_table(_lowercase ) UpperCAmelCase : Union[str, Any] = pa_table.to_pydict() if embed_local_files: assert isinstance(out["""image"""][0]["""path"""] , _lowercase ) with open(_lowercase , """rb""" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def __lowerCamelCase ( ) -> List[Any]: UpperCAmelCase : List[str] = pa.schema([pa.field("""col_1""" , pa.string() , nullable=_lowercase )] ) UpperCAmelCase : Union[str, Any] = pa.BufferOutputStream() with ArrowWriter(stream=_lowercase ) as writer: writer._build_writer(inferred_schema=_lowercase ) assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
265
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> int: if b == 0: return 1 if (b % 2) == 0: return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) else: return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) def __lowerCamelCase ( _lowercase , _lowercase ) -> float: if b < 0: return 1 / actual_power(_lowercase , _lowercase ) return actual_power(_lowercase , _lowercase ) if __name__ == "__main__": print(power(-2, -3))
265
1
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class UpperCamelCase_ : def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=2 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> int: UpperCAmelCase : Optional[int] = parent UpperCAmelCase : Any = 13 UpperCAmelCase : str = 7 UpperCAmelCase : List[str] = True UpperCAmelCase : Dict = True UpperCAmelCase : Tuple = True UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Tuple = 99 UpperCAmelCase : int = 384 UpperCAmelCase : Tuple = 2 UpperCAmelCase : Tuple = 4 UpperCAmelCase : Any = 37 UpperCAmelCase : Tuple = """gelu""" UpperCAmelCase : Dict = 0.1 UpperCAmelCase : List[str] = 0.1 UpperCAmelCase : str = 512 UpperCAmelCase : Union[str, Any] = 16 UpperCAmelCase : Dict = 2 UpperCAmelCase : Any = 0.0_2 UpperCAmelCase : str = 3 UpperCAmelCase : Union[str, Any] = 4 UpperCAmelCase : Tuple = 128 UpperCAmelCase : Optional[int] = 2 UpperCAmelCase : Optional[int] = 9 UpperCAmelCase : List[str] = 1 UpperCAmelCase : Optional[int] = None def _lowercase( self ) -> List[str]: UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Dict = None if self.use_token_type_ids: UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase : int = None UpperCAmelCase : List[Any] = None UpperCAmelCase : Any = None if self.use_labels: UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase : Optional[Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase( self , A , A , A , A , A , A , A ) -> Optional[Any]: UpperCAmelCase : List[Any] = TFConvBertModel(config=A ) UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} UpperCAmelCase : Union[str, Any] = [input_ids, input_mask] UpperCAmelCase : str = model(A ) UpperCAmelCase : Tuple = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A , A , A , A , A ) -> Any: UpperCAmelCase : Any = TFConvBertForMaskedLM(config=A ) UpperCAmelCase : List[Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } UpperCAmelCase : List[Any] = model(A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase( self , A , A , A , A , A , A , A ) -> Tuple: UpperCAmelCase : Optional[Any] = self.num_labels UpperCAmelCase : Optional[int] = TFConvBertForSequenceClassification(config=A ) UpperCAmelCase : str = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } UpperCAmelCase : Union[str, Any] = model(A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase( self , A , A , A , A , A , A , A ) -> Optional[Any]: UpperCAmelCase : Any = self.num_choices UpperCAmelCase : Optional[Any] = TFConvBertForMultipleChoice(config=A ) UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase : Dict = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase : Any = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase : Union[str, Any] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } UpperCAmelCase : Tuple = model(A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase( self , A , A , A , A , A , A , A ) -> Any: UpperCAmelCase : int = self.num_labels UpperCAmelCase : Optional[int] = TFConvBertForTokenClassification(config=A ) UpperCAmelCase : Union[str, Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } UpperCAmelCase : Dict = model(A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase( self , A , A , A , A , A , A , A ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = TFConvBertForQuestionAnswering(config=A ) UpperCAmelCase : Optional[Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } UpperCAmelCase : List[str] = model(A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : int = config_and_inputs UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) lowercase = ( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[Any] = TFConvBertModelTester(self ) UpperCAmelCase : List[Any] = ConfigTester(self , config_class=A , hidden_size=37 ) def _lowercase( self ) -> Any: self.config_tester.run_common_tests() def _lowercase( self ) -> int: UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def _lowercase( self ) -> List[str]: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A ) def _lowercase( self ) -> Any: UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A ) def _lowercase( self ) -> Dict: UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def _lowercase( self ) -> int: UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def _lowercase( self ) -> int: UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @slow def _lowercase( self ) -> List[str]: UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : List[Any] = True UpperCAmelCase : int = True if hasattr(A , """use_cache""" ): UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) UpperCAmelCase : Tuple = getattr(self.model_tester , """key_length""" , A ) for model_class in self.all_model_classes: UpperCAmelCase : Union[str, Any] = self._prepare_for_class(A , A ) UpperCAmelCase : Tuple = model_class(A ) UpperCAmelCase : str = len(model(A ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(A , saved_model=A ) UpperCAmelCase : List[Any] = os.path.join(A , """saved_model""" , """1""" ) UpperCAmelCase : Dict = tf.keras.models.load_model(A ) UpperCAmelCase : Optional[int] = model(A ) if self.is_encoder_decoder: UpperCAmelCase : Dict = outputs["""encoder_hidden_states"""] UpperCAmelCase : Tuple = outputs["""encoder_attentions"""] else: UpperCAmelCase : Tuple = outputs["""hidden_states"""] UpperCAmelCase : int = outputs["""attentions"""] self.assertEqual(len(A ) , A ) UpperCAmelCase : str = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(A ) , A ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def _lowercase( self ) -> Any: UpperCAmelCase : int = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) self.assertIsNotNone(A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Tuple = True UpperCAmelCase : Optional[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length ) UpperCAmelCase : Union[str, Any] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) UpperCAmelCase : str = getattr(self.model_tester , """key_length""" , A ) UpperCAmelCase : Tuple = getattr(self.model_tester , """key_length""" , A ) def check_decoder_attentions_output(A ): UpperCAmelCase : Optional[Any] = len(A ) self.assertEqual(out_len % 2 , 0 ) UpperCAmelCase : Tuple = outputs.decoder_attentions self.assertEqual(len(A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(A ): UpperCAmelCase : Union[str, Any] = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: UpperCAmelCase : int = True UpperCAmelCase : Tuple = False UpperCAmelCase : Dict = model_class(A ) UpperCAmelCase : Dict = model(self._prepare_for_class(A , A ) ) UpperCAmelCase : Union[str, Any] = len(A ) self.assertEqual(config.output_hidden_states , A ) check_encoder_attentions_output(A ) if self.is_encoder_decoder: UpperCAmelCase : List[Any] = model_class(A ) UpperCAmelCase : Dict = model(self._prepare_for_class(A , A ) ) self.assertEqual(config.output_hidden_states , A ) check_decoder_attentions_output(A ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] UpperCAmelCase : List[str] = True UpperCAmelCase : int = model_class(A ) UpperCAmelCase : List[Any] = model(self._prepare_for_class(A , A ) ) self.assertEqual(config.output_hidden_states , A ) check_encoder_attentions_output(A ) # Check attention is always last and order is fine UpperCAmelCase : Dict = True UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : List[str] = model_class(A ) UpperCAmelCase : str = model(self._prepare_for_class(A , A ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A ) ) self.assertEqual(model.config.output_hidden_states , A ) check_encoder_attentions_output(A ) @require_tf class UpperCamelCase_ ( unittest.TestCase ): @slow def _lowercase( self ) -> List[str]: UpperCAmelCase : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) UpperCAmelCase : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCAmelCase : Tuple = model(A )[0] UpperCAmelCase : int = [1, 6, 768] self.assertEqual(output.shape , A ) UpperCAmelCase : Union[str, Any] = tf.constant( [ [ [-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2], [0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4], [0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , A , atol=1e-4 )
265
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : Any = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = AlbertTokenizer lowercase = AlbertTokenizerFast lowercase = True lowercase = True lowercase = True def _lowercase( self ) -> str: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : Optional[int] = AlbertTokenizer(A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self , A ) -> int: UpperCAmelCase : Optional[int] = """this is a test""" UpperCAmelCase : Dict = """this is a test""" return input_text, output_text def _lowercase( self ) -> int: UpperCAmelCase : Tuple = """<pad>""" UpperCAmelCase : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A ) def _lowercase( self ) -> Any: UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(A ) , 30000 ) def _lowercase( self ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _lowercase( self ) -> Union[str, Any]: if not self.test_rust_tokenizer: return UpperCAmelCase : int = self.get_tokenizer() UpperCAmelCase : List[str] = self.get_rust_tokenizer() UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé.""" UpperCAmelCase : str = tokenizer.tokenize(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A ) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A ) self.assertListEqual(A , A ) UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A ) self.assertListEqual(A , A ) def _lowercase( self ) -> Any: UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A ) UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : str = AlbertTokenizer(A ) UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" ) UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" ) UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def _lowercase( self ) -> Dict: # fmt: off UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
265
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a : int = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Dict = ["""ReformerTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : str = ["""ReformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ """REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ReformerAttention""", """ReformerForMaskedLM""", """ReformerForQuestionAnswering""", """ReformerForSequenceClassification""", """ReformerLayer""", """ReformerModel""", """ReformerModelWithLMHead""", """ReformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys a : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = StableDiffusionDiffEditPipeline lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} lowercase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase = frozenset([] ) def _lowercase( self ) -> Optional[int]: torch.manual_seed(0 ) UpperCAmelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , ) UpperCAmelCase : int = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , ) UpperCAmelCase : List[Any] = DDIMInverseScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , ) torch.manual_seed(0 ) UpperCAmelCase : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) UpperCAmelCase : Optional[Any] = CLIPTextModel(A ) UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCAmelCase : int = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _lowercase( self , A , A=0 ) -> Optional[Any]: UpperCAmelCase : Any = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A ) if str(A ).startswith("""mps""" ): UpperCAmelCase : List[Any] = torch.manual_seed(A ) else: UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : int = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> Optional[int]: UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : Any = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> str: UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : str = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def _lowercase( self ) -> List[Any]: if not hasattr(self.pipeline_class , """_optional_components""" ): return UpperCAmelCase : Dict = self.get_dummy_components() UpperCAmelCase : int = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A , A , A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) UpperCAmelCase : Any = self.get_dummy_inputs(A ) UpperCAmelCase : Optional[Any] = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) UpperCAmelCase : Dict = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A , A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A ) UpperCAmelCase : Tuple = pipe_loaded(**A )[0] UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max() self.assertLess(A , 1e-4 ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[int] = """cpu""" UpperCAmelCase : Optional[Any] = self.get_dummy_components() UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(A ) UpperCAmelCase : List[Any] = pipe.generate_mask(**A ) UpperCAmelCase : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) UpperCAmelCase : Optional[int] = np.array([0] * 9 ) UpperCAmelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = """cpu""" UpperCAmelCase : List[str] = self.get_dummy_components() UpperCAmelCase : Optional[Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : List[str] = pipe.invert(**A ).images UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Dict = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) def _lowercase( self ) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def _lowercase( self ) -> int: UpperCAmelCase : List[Any] = """cpu""" UpperCAmelCase : int = self.get_dummy_components() UpperCAmelCase : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""} UpperCAmelCase : int = DPMSolverMultistepScheduler(**A ) UpperCAmelCase : int = DPMSolverMultistepInverseScheduler(**A ) UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : Any = pipe.invert(**A ).images UpperCAmelCase : Dict = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Any = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) @require_torch_gpu @slow class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" ) UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) ) UpperCAmelCase : List[str] = raw_image def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Dict = torch.manual_seed(0 ) UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = """a bowl of fruit""" UpperCAmelCase : List[Any] = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Tuple = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents UpperCAmelCase : Any = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0] UpperCAmelCase : List[str] = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1 def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : int = """a bowl of fruit""" UpperCAmelCase : int = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Any = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents UpperCAmelCase : str = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0] UpperCAmelCase : Tuple = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1
265
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def __lowerCamelCase ( _lowercase ) -> Tuple: return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Any = create_tensor(_lowercase ) UpperCAmelCase : Union[str, Any] = gather(_lowercase ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : Any = [state.process_index] UpperCAmelCase : Union[str, Any] = gather_object(_lowercase ) assert len(_lowercase ) == state.num_processes, F'''{gathered_obj}, {len(_lowercase )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}''' def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Optional[int] = create_tensor(_lowercase ) UpperCAmelCase : List[str] = broadcast(_lowercase ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Tuple: # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device ) else: UpperCAmelCase : Tuple = torch.arange(state.num_processes ).to(state.device ) UpperCAmelCase : Optional[Any] = pad_across_processes(_lowercase ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def __lowerCamelCase ( _lowercase ) -> Dict: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Optional[Any] = create_tensor(_lowercase ) UpperCAmelCase : Optional[Any] = reduce(_lowercase , """sum""" ) UpperCAmelCase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[Any]: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Tuple = create_tensor(_lowercase ) UpperCAmelCase : Optional[int] = reduce(_lowercase , """mean""" ) UpperCAmelCase : str = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[int]: # For xla_spawn (TPUs) main() def __lowerCamelCase ( ) -> int: UpperCAmelCase : List[Any] = PartialState() state.print(F'''State: {state}''' ) state.print("""testing gather""" ) test_gather(_lowercase ) state.print("""testing gather_object""" ) test_gather_object(_lowercase ) state.print("""testing broadcast""" ) test_broadcast(_lowercase ) state.print("""testing pad_across_processes""" ) test_pad_across_processes(_lowercase ) state.print("""testing reduce_sum""" ) test_reduce_sum(_lowercase ) state.print("""testing reduce_mean""" ) test_reduce_mean(_lowercase ) if __name__ == "__main__": main()
265
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : Dict = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_lowercase , _lowercase ) def __lowerCamelCase ( _lowercase ) -> Tuple: UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape UpperCAmelCase : Dict = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) UpperCAmelCase : Optional[Any] = emb.weight.data return lin_layer def __lowerCamelCase ( _lowercase , _lowercase=None ) -> List[Any]: UpperCAmelCase : Dict = {} for old_key in state_dict.keys(): UpperCAmelCase : str = old_key if "moe_layer.experts." in key: if expert_idx is not None: UpperCAmelCase : int = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' ) else: UpperCAmelCase : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" ) if "gate" in key: UpperCAmelCase : str = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" ) if "fc2" and "experts" not in key: UpperCAmelCase : Tuple = key.replace(""".fc2.""" , """.ffn.fc2.""" ) if "fc1" and "experts" not in key: UpperCAmelCase : List[Any] = key.replace(""".fc1.""" , """.ffn.fc1.""" ) if ".encoder_attn." in key: UpperCAmelCase : Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" ) if "encoder_attn_layer_norm" in key: UpperCAmelCase : str = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" ) if "final_layer_norm" in key: UpperCAmelCase : Optional[int] = key.replace("""final_layer_norm""" , """ff_layer_norm""" ) UpperCAmelCase : str = state_dict[old_key] return new_dict def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ) -> Tuple: UpperCAmelCase : Any = [] UpperCAmelCase : Dict = 0 os.makedirs(_lowercase , exist_ok=_lowercase ) for expert in range(_lowercase ): UpperCAmelCase : str = switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(_lowercase ): UpperCAmelCase : Optional[Any] = torch.load(_lowercase )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : List[str] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : Optional[Any] = os.path.join( _lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) torch.save(_lowercase , _lowercase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_lowercase )[0]].dtype ) # Add the last block UpperCAmelCase : Optional[Any] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) UpperCAmelCase : Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : Optional[Any] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : int = shared_weights["""decoder.embed_tokens.weight"""] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_lowercase ) == 1: UpperCAmelCase : Dict = os.path.join(_lowercase , _lowercase ) torch.save(_lowercase , _lowercase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_lowercase , _lowercase ) # Otherwise, let's build the index UpperCAmelCase : Optional[int] = {} for idx, shard in enumerate(_lowercase ): UpperCAmelCase : List[str] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase ):05d}.bin''' ) UpperCAmelCase : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_lowercase , os.path.join(_lowercase , _lowercase ) ) for key in shard: UpperCAmelCase : Tuple = shard_file # Add the metadata UpperCAmelCase : Any = {"""total_size""": total_size} UpperCAmelCase : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(_lowercase , _lowercase ) , """w""" , encoding="""utf-8""" ) as f: UpperCAmelCase : str = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + """\n""" f.write(_lowercase ) return metadata, index if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) a : int = parser.parse_args() a , a : Any = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) a : str = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) a : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
265
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ ( __magic_name__ ): def __init__( self , A , A ) -> Optional[Any]: super().__init__() self.register_modules(unet=A , scheduler=A ) @torch.no_grad() def __call__( self , A = 1 , A = 100 , A = None , A = None , A = True , ) -> Union[AudioPipelineOutput, Tuple]: if audio_length_in_s is None: UpperCAmelCase : str = self.unet.config.sample_size / self.unet.config.sample_rate UpperCAmelCase : Optional[int] = audio_length_in_s * self.unet.config.sample_rate UpperCAmelCase : Optional[Any] = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' ) UpperCAmelCase : Dict = int(A ) if sample_size % down_scale_factor != 0: UpperCAmelCase : Tuple = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' """ process.""" ) UpperCAmelCase : Optional[int] = int(A ) UpperCAmelCase : Any = next(iter(self.unet.parameters() ) ).dtype UpperCAmelCase : Any = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(A , A ) and len(A ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(A )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) UpperCAmelCase : List[str] = randn_tensor(A , generator=A , device=self.device , dtype=A ) # set step values self.scheduler.set_timesteps(A , device=audio.device ) UpperCAmelCase : List[Any] = self.scheduler.timesteps.to(A ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCAmelCase : int = self.unet(A , A ).sample # 2. compute previous image: x_t -> t_t-1 UpperCAmelCase : Tuple = self.scheduler.step(A , A , A ).prev_sample UpperCAmelCase : Dict = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCAmelCase : Dict = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=A )
265
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a : List[Any] = logging.get_logger(__name__) a : Union[str, Any] = torch.device("""cpu""") def __lowerCamelCase ( ) -> Any: UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) return im def __lowerCamelCase ( _lowercase ) -> Dict: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str: UpperCAmelCase : Union[str, Any] = dct.pop(_lowercase ) UpperCAmelCase : str = val def __lowerCamelCase ( _lowercase ) -> List[str]: UpperCAmelCase : Tuple = [] for k in state_dict.keys(): UpperCAmelCase : Dict = k if ".pwconv" in k: UpperCAmelCase : Union[str, Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: UpperCAmelCase : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: UpperCAmelCase : Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: UpperCAmelCase : Optional[Any] = k_new.split(""".""" ) if ls[2].isdigit(): UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[Any] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size UpperCAmelCase : List[Any] = 1_0_0_0 UpperCAmelCase : List[str] = """huggingface/label-files""" UpperCAmelCase : Tuple = """imagenet-1k-id2label.json""" UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase : Tuple = {int(_lowercase ): v for k, v in idalabel.items()} UpperCAmelCase : Tuple = idalabel UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": UpperCAmelCase : List[Any] = [3, 3, 6, 4] UpperCAmelCase : int = [4_8, 5_6, 1_1_2, 2_2_0] elif swiftformer_name == "swiftformer_s": UpperCAmelCase : str = [3, 3, 9, 6] UpperCAmelCase : str = [4_8, 6_4, 1_6_8, 2_2_4] elif swiftformer_name == "swiftformer_l1": UpperCAmelCase : List[Any] = [4, 3, 1_0, 5] UpperCAmelCase : Union[str, Any] = [4_8, 9_6, 1_9_2, 3_8_4] elif swiftformer_name == "swiftformer_l3": UpperCAmelCase : Any = [4, 4, 1_2, 6] UpperCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase ) else: UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" ) UpperCAmelCase : str = checkpoint UpperCAmelCase : Tuple = create_rename_keys(_lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) # load HuggingFace model UpperCAmelCase : str = SwiftFormerForImageClassification(_lowercase ).eval() hf_model.load_state_dict(_lowercase ) # prepare test inputs UpperCAmelCase : Any = prepare_img() UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) UpperCAmelCase : List[str] = processor(images=_lowercase , return_tensors="""pt""" ) # compare outputs from both models UpperCAmelCase : List[str] = get_expected_output(_lowercase ) UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1_0_0_0] ) assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' ) hf_model.save_pretrained(_lowercase ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swiftformer_name""", default="""swiftformer_xs""", choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""], type=str, help="""Name of the SwiftFormer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""./converted_outputs/""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""") a : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
265
1
'''simple docstring''' import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Any: # Initialise PyTorch model UpperCAmelCase : Any = AlbertConfig.from_json_file(_lowercase ) print(F'''Building PyTorch model from configuration: {config}''' ) UpperCAmelCase : Tuple = AlbertForPreTraining(_lowercase ) # Load weights from tf checkpoint load_tf_weights_in_albert(_lowercase , _lowercase , _lowercase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , _lowercase ) if __name__ == "__main__": a : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--albert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained ALBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a : str = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
265
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def __lowerCamelCase ( ) -> Any: raise RuntimeError("""CUDA out of memory.""" ) class UpperCamelCase_ ( nn.Module ): def __init__( self ) -> Any: super().__init__() UpperCAmelCase : Tuple = nn.Linear(3 , 4 ) UpperCAmelCase : Tuple = nn.BatchNormad(4 ) UpperCAmelCase : int = nn.Linear(4 , 5 ) def _lowercase( self , A ) -> Any: return self.lineara(self.batchnorm(self.lineara(A ) ) ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[int] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(A , [128, 64, 32, 16, 8] ) def _lowercase( self ) -> Any: UpperCAmelCase : Optional[Any] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A , A ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga UpperCAmelCase , UpperCAmelCase : Optional[int] = mock_training_loop_function("""hello""" ) self.assertListEqual(A , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, """hello"""] ) def _lowercase( self ) -> Any: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(A ): pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _lowercase( self ) -> Optional[int]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _lowercase( self ) -> Optional[Any]: @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A , A , A ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(A ) as cm: mock_training_loop_function(128 , """hello""" , """world""" ) self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] ) self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] ) def _lowercase( self ) -> int: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A ): raise ValueError("""Oops, we had an error!""" ) with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] ) @require_cuda def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated() UpperCAmelCase : List[str] = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , A ) UpperCAmelCase : Tuple = release_memory(A ) self.assertEqual(torch.cuda.memory_allocated() , A )
265
1
'''simple docstring''' import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : int = args.log_outputs UpperCAmelCase : List[Any] = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] ) # load metric UpperCAmelCase : Dict = load_metric("""wer""" ) UpperCAmelCase : Optional[Any] = load_metric("""cer""" ) # compute metrics UpperCAmelCase : str = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] ) UpperCAmelCase : int = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] ) # print & log results UpperCAmelCase : int = F'''WER: {wer_result}\nCER: {cer_result}''' print(_lowercase ) with open(F'''{dataset_id}_eval_results.txt''' , """w""" ) as f: f.write(_lowercase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCAmelCase : Dict = F'''log_{dataset_id}_predictions.txt''' UpperCAmelCase : Tuple = F'''log_{dataset_id}_targets.txt''' with open(_lowercase , """w""" ) as p, open(_lowercase , """w""" ) as t: # mapping function to write output def write_to_file(_lowercase , _lowercase ): p.write(F'''{i}''' + """\n""" ) p.write(batch["""prediction"""] + """\n""" ) t.write(F'''{i}''' + """\n""" ) t.write(batch["""target"""] + """\n""" ) result.map(_lowercase , with_indices=_lowercase ) def __lowerCamelCase ( _lowercase ) -> str: UpperCAmelCase : str = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCAmelCase : List[str] = re.sub(_lowercase , """""" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCAmelCase : int = ["""\n\n""", """\n""", """ """, """ """] for t in token_sequences_to_ignore: UpperCAmelCase : List[Any] = """ """.join(text.split(_lowercase ) ) return text def __lowerCamelCase ( _lowercase ) -> Tuple: # load dataset UpperCAmelCase : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_lowercase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCAmelCase : List[Any] = feature_extractor.sampling_rate # resample audio UpperCAmelCase : Tuple = dataset.cast_column("""audio""" , Audio(sampling_rate=_lowercase ) ) # load eval pipeline if args.device is None: UpperCAmelCase : Tuple = 0 if torch.cuda.is_available() else -1 UpperCAmelCase : Union[str, Any] = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_lowercase ): UpperCAmelCase : int = asr( batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) UpperCAmelCase : Optional[Any] = prediction["""text"""] UpperCAmelCase : Union[str, Any] = normalize_text(batch["""sentence"""] ) return batch # run inference on all examples UpperCAmelCase : List[str] = dataset.map(_lowercase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_lowercase , _lowercase ) if __name__ == "__main__": a : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) a : Any = parser.parse_args() main(args)
265
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Optional[int] = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
1
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class UpperCamelCase_ ( __magic_name__ ): lowercase = ['vqvae'] def __init__( self , A , A , A , A , ) -> List[str]: super().__init__() self.register_modules(unet=A , scheduler=A , mel=A , vqvae=A ) def _lowercase( self ) -> int: return 50 if isinstance(self.scheduler , A ) else 1000 @torch.no_grad() def __call__( self , A = 1 , A = None , A = None , A = 0 , A = 0 , A = None , A = None , A = 0 , A = 0 , A = None , A = 0 , A = None , A = None , A=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: UpperCAmelCase : Dict = steps or self.get_default_steps() self.scheduler.set_timesteps(A ) UpperCAmelCase : Optional[int] = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: UpperCAmelCase : Union[str, Any] = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: UpperCAmelCase : str = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=A , device=self.device , ) UpperCAmelCase : Dict = noise UpperCAmelCase : Any = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(A , A ) UpperCAmelCase : int = self.mel.audio_slice_to_image(A ) UpperCAmelCase : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape( (input_image.height, input_image.width) ) UpperCAmelCase : Union[str, Any] = (input_image / 255) * 2 - 1 UpperCAmelCase : Optional[int] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: UpperCAmelCase : Optional[int] = self.vqvae.encode(torch.unsqueeze(A , 0 ) ).latent_dist.sample( generator=A )[0] UpperCAmelCase : Union[str, Any] = self.vqvae.config.scaling_factor * input_images if start_step > 0: UpperCAmelCase : Optional[Any] = self.scheduler.add_noise(A , A , self.scheduler.timesteps[start_step - 1] ) UpperCAmelCase : Tuple = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) UpperCAmelCase : int = int(mask_start_secs * pixels_per_second ) UpperCAmelCase : List[Any] = int(mask_end_secs * pixels_per_second ) UpperCAmelCase : List[str] = self.scheduler.add_noise(A , A , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , A ): UpperCAmelCase : Dict = self.unet(A , A , A )["""sample"""] else: UpperCAmelCase : str = self.unet(A , A )["""sample"""] if isinstance(self.scheduler , A ): UpperCAmelCase : Union[str, Any] = self.scheduler.step( model_output=A , timestep=A , sample=A , eta=A , generator=A , )["""prev_sample"""] else: UpperCAmelCase : Any = self.scheduler.step( model_output=A , timestep=A , sample=A , generator=A , )["""prev_sample"""] if mask is not None: if mask_start > 0: UpperCAmelCase : List[str] = mask[:, step, :, :mask_start] if mask_end > 0: UpperCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance UpperCAmelCase : List[str] = 1 / self.vqvae.config.scaling_factor * images UpperCAmelCase : Union[str, Any] = self.vqvae.decode(A )["""sample"""] UpperCAmelCase : Any = (images / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase : Optional[int] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() UpperCAmelCase : List[Any] = (images * 255).round().astype("""uint8""" ) UpperCAmelCase : Union[str, Any] = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(A , mode="""RGB""" ).convert("""L""" ) for _ in images) ) UpperCAmelCase : List[str] = [self.mel.image_to_audio(A ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(A )[:, np.newaxis, :] ) , **ImagePipelineOutput(A ) ) @torch.no_grad() def _lowercase( self , A , A = 50 ) -> np.ndarray: assert isinstance(self.scheduler , A ) self.scheduler.set_timesteps(A ) UpperCAmelCase : str = np.array( [np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] ) UpperCAmelCase : Tuple = (sample / 255) * 2 - 1 UpperCAmelCase : Any = torch.Tensor(A ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): UpperCAmelCase : int = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps UpperCAmelCase : Any = self.scheduler.alphas_cumprod[t] UpperCAmelCase : int = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) UpperCAmelCase : Any = 1 - alpha_prod_t UpperCAmelCase : Any = self.unet(A , A )["""sample"""] UpperCAmelCase : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output UpperCAmelCase : Optional[int] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) UpperCAmelCase : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _lowercase( A , A , A ) -> torch.Tensor: UpperCAmelCase : Tuple = acos(torch.dot(torch.flatten(A ) , torch.flatten(A ) ) / torch.norm(A ) / torch.norm(A ) ) return sin((1 - alpha) * theta ) * xa / sin(A ) + sin(alpha * theta ) * xa / sin(A )
265
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = LongformerTokenizer lowercase = True lowercase = LongformerTokenizerFast lowercase = True def _lowercase( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase : List[str] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCAmelCase : int = dict(zip(A , range(len(A ) ) ) ) UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A ) ) def _lowercase( self , **A ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , **A ) -> int: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , A ) -> Optional[int]: UpperCAmelCase : Optional[Any] = """lower newer""" UpperCAmelCase : Optional[int] = """lower newer""" return input_text, output_text def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase : Dict = """lower newer""" UpperCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] UpperCAmelCase : Tuple = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokens + [tokenizer.unk_token] UpperCAmelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" ) UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A ) UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A ) UpperCAmelCase : List[str] = tokenizer.encode( """sequence builders""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : List[str] = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = self.get_tokenizer() UpperCAmelCase : List[Any] = """Encode this sequence.""" UpperCAmelCase : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments UpperCAmelCase : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A , A ) UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A , A ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A , A ) # Testing spaces after special tokens UpperCAmelCase : Union[str, Any] = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) UpperCAmelCase : Union[str, Any] = """Encode <mask> sequence""" UpperCAmelCase : Union[str, Any] = """Encode <mask>sequence""" UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Union[str, Any] = encoded.index(A ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A , A ) UpperCAmelCase : Tuple = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = encoded.index(A ) UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A , A ) def _lowercase( self ) -> Optional[int]: pass def _lowercase( self ) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence.""" UpperCAmelCase : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A ) UpperCAmelCase : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def _lowercase( self ) -> List[Any]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A ) self.assertEqual(post_processor_state["""add_prefix_space"""] , A ) self.assertEqual(post_processor_state["""trim_offsets"""] , A ) def _lowercase( self ) -> Optional[Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` UpperCAmelCase : int = f'''{text_of_1_token} {text_of_1_token}''' UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , ) UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , ) UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[Any] = f''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , ) UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
265
1
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule a : Tuple = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. a : Tuple = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class UpperCamelCase_ ( unittest.TestCase ): lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: lowercase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _lowercase( self , A , A , A ) -> Dict: UpperCAmelCase : Union[str, Any] = ZeroShotClassificationPipeline( model=A , tokenizer=A , candidate_labels=["""polics""", """health"""] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _lowercase( self , A , A ) -> Optional[int]: UpperCAmelCase : Dict = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # No kwarg UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , ["""politics"""] ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase : str = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" ) self.assertEqual( A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] ) self.assertEqual( A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) UpperCAmelCase : Dict = classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # https://github.com/huggingface/transformers/issues/13846 UpperCAmelCase : str = classifier(["""I am happy"""] , ["""positive""", """negative"""] ) self.assertEqual( A , [ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(1 ) ] , ) UpperCAmelCase : List[str] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] ) self.assertEqual( A , [ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(2 ) ] , ) with self.assertRaises(A ): classifier("""""" , candidate_labels="""politics""" ) with self.assertRaises(A ): classifier(A , candidate_labels="""politics""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" , candidate_labels=A ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A , ) self.run_entailment_id(A ) def _lowercase( self , A ) -> Any: UpperCAmelCase : Tuple = zero_shot_classifier.model.config UpperCAmelCase : Union[str, Any] = config.labelaid UpperCAmelCase : Tuple = zero_shot_classifier.entailment_id UpperCAmelCase : Any = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) UpperCAmelCase : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase : Any = {"""ENTAIL""": 0, """NON-ENTAIL""": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase : List[str] = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) UpperCAmelCase : Tuple = original_labelaid self.assertEqual(A , zero_shot_classifier.entailment_id ) @require_torch def _lowercase( self ) -> str: UpperCAmelCase : int = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( """Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] ) @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) UpperCAmelCase : Union[str, Any] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , ) UpperCAmelCase : List[Any] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[int] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" ) UpperCAmelCase : Optional[int] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase : str = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , ) self.assertEqual( nested_simplify(A ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def _lowercase( self ) -> List[str]: UpperCAmelCase : int = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" ) UpperCAmelCase : Tuple = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase : Any = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , ) self.assertEqual( nested_simplify(A ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
265
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class UpperCamelCase_ : def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Dict: UpperCAmelCase : int = parent UpperCAmelCase : Any = batch_size UpperCAmelCase : List[Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : Optional[int] = use_token_type_ids UpperCAmelCase : Optional[Any] = use_labels UpperCAmelCase : Optional[Any] = vocab_size UpperCAmelCase : Optional[Any] = hidden_size UpperCAmelCase : Optional[Any] = num_hidden_layers UpperCAmelCase : List[str] = num_attention_heads UpperCAmelCase : Union[str, Any] = intermediate_size UpperCAmelCase : int = hidden_act UpperCAmelCase : Union[str, Any] = hidden_dropout_prob UpperCAmelCase : List[Any] = attention_probs_dropout_prob UpperCAmelCase : int = max_position_embeddings UpperCAmelCase : List[str] = type_vocab_size UpperCAmelCase : Union[str, Any] = type_sequence_label_size UpperCAmelCase : Union[str, Any] = initializer_range UpperCAmelCase : List[str] = num_labels UpperCAmelCase : Optional[Any] = num_choices UpperCAmelCase : List[str] = scope UpperCAmelCase : Optional[Any] = self.vocab_size - 1 def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Any = None if self.use_token_type_ids: UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase : Dict = None UpperCAmelCase : List[Any] = None UpperCAmelCase : Dict = None if self.use_labels: UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase : Tuple = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) UpperCAmelCase : int = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _lowercase( self , A , A , A , A , *A ) -> List[Any]: UpperCAmelCase : str = OpenAIGPTModel(config=A ) model.to(A ) model.eval() UpperCAmelCase : Any = model(A , token_type_ids=A , head_mask=A ) UpperCAmelCase : Tuple = model(A , token_type_ids=A ) UpperCAmelCase : List[str] = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A , A , *A ) -> Dict: UpperCAmelCase : int = OpenAIGPTLMHeadModel(A ) model.to(A ) model.eval() UpperCAmelCase : Tuple = model(A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase( self , A , A , A , A , *A ) -> Dict: UpperCAmelCase : Dict = OpenAIGPTDoubleHeadsModel(A ) model.to(A ) model.eval() UpperCAmelCase : Tuple = model(A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase( self , A , A , A , A , *A ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = self.num_labels UpperCAmelCase : str = OpenAIGPTForSequenceClassification(A ) model.to(A ) model.eval() UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Optional[int] = model(A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase( self ) -> Any: UpperCAmelCase : Dict = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : List[str] = config_and_inputs UpperCAmelCase : Tuple = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask, } return config, inputs_dict @require_torch class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) lowercase = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly lowercase = ( { 'feature-extraction': OpenAIGPTModel, 'text-classification': OpenAIGPTForSequenceClassification, 'text-generation': OpenAIGPTLMHeadModel, 'zero-shot': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _lowercase( self , A , A , A , A , A ) -> Optional[Any]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _lowercase( self , A , A , A=False ) -> int: UpperCAmelCase : List[str] = super()._prepare_for_class(A , A , return_labels=A ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": UpperCAmelCase : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , ) UpperCAmelCase : Union[str, Any] = inputs_dict["""labels"""] UpperCAmelCase : Optional[Any] = inputs_dict["""labels"""] UpperCAmelCase : int = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , ) UpperCAmelCase : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=A ) return inputs_dict def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[int] = OpenAIGPTModelTester(self ) UpperCAmelCase : Tuple = ConfigTester(self , config_class=A , n_embd=37 ) def _lowercase( self ) -> str: self.config_tester.run_common_tests() def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*A ) def _lowercase( self ) -> Tuple: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*A ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*A ) def _lowercase( self ) -> Tuple: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A ) @slow def _lowercase( self ) -> int: for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : List[str] = OpenAIGPTModel.from_pretrained(A ) self.assertIsNotNone(A ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _lowercase( self ) -> Dict: UpperCAmelCase : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" ) model.to(A ) UpperCAmelCase : Union[str, Any] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=A ) # the president is UpperCAmelCase : int = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the UpperCAmelCase : Optional[int] = model.generate(A , do_sample=A ) self.assertListEqual(output_ids[0].tolist() , A )
265
'''simple docstring''' import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder a : List[Any] = """__DUMMY_TRANSFORMERS_USER__""" a : Tuple = """Dummy User""" a : Optional[Any] = """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""" a : Optional[Any] = """https://hub-ci.huggingface.co""" a : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}""" a : Tuple = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}""" a : str = Path("""~/.huggingface/hub_ci_token""").expanduser() @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Optional[int]: monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> List[str]: monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _lowercase ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Any: monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]: HfFolder.save_token(_lowercase ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( ) -> str: return HfApi(endpoint=_lowercase ) @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : str = HfFolder.get_token() HfFolder.save_token(_lowercase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Any: def _cleanup_repo(_lowercase ): hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def __lowerCamelCase ( _lowercase ) -> List[str]: @contextmanager def _temporary_repo(_lowercase ): try: yield repo_id finally: cleanup_repo(_lowercase ) return _temporary_repo @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: UpperCAmelCase : str = F'''repo_txt_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : List[Any] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data/text_data.txt""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : Optional[int] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple: UpperCAmelCase : List[Any] = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : List[str] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: return hf_private_dataset_repo_zipped_img_data_
265
1
'''simple docstring''' import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip a : str = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def __lowerCamelCase ( _lowercase ) -> List[str]: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: return max(metric_fn(_lowercase , _lowercase ) for gt in ground_truths ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> int: UpperCAmelCase : List[Any] = [line.strip() for line in open(_lowercase , """r""" ).readlines()] UpperCAmelCase : Tuple = [] if args.gold_data_mode == "qa": UpperCAmelCase : Optional[int] = pd.read_csv(_lowercase , sep="""\t""" , header=_lowercase ) for answer_list in data[1]: UpperCAmelCase : Tuple = ast.literal_eval(_lowercase ) answers.append(_lowercase ) else: UpperCAmelCase : List[Any] = [line.strip() for line in open(_lowercase , """r""" ).readlines()] UpperCAmelCase : str = [[reference] for reference in references] UpperCAmelCase : str = 0 for prediction, ground_truths in zip(_lowercase , _lowercase ): total += 1 em += metric_max_over_ground_truths(_lowercase , _lowercase , _lowercase ) fa += metric_max_over_ground_truths(_lowercase , _lowercase , _lowercase ) UpperCAmelCase : Tuple = 100.0 * em / total UpperCAmelCase : Any = 100.0 * fa / total logger.info(F'''F1: {fa:.2f}''' ) logger.info(F'''EM: {em:.2f}''' ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: UpperCAmelCase : Union[str, Any] = args.k UpperCAmelCase : Union[str, Any] = [line.strip() for line in open(_lowercase , """r""" ).readlines()] UpperCAmelCase : str = [line.strip() for line in open(_lowercase , """r""" ).readlines()] UpperCAmelCase : Dict = 0 for hypo, reference in zip(_lowercase , _lowercase ): UpperCAmelCase : Optional[Any] = set(hypo.split("""\t""" )[:k] ) UpperCAmelCase : Optional[int] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k UpperCAmelCase : Dict = 100.0 * em / total logger.info(F'''Precision@{k}: {em: .2f}''' ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> int: def strip_title(_lowercase ): if title.startswith("""\"""" ): UpperCAmelCase : Union[str, Any] = title[1:] if title.endswith("""\"""" ): UpperCAmelCase : Optional[int] = title[:-1] return title UpperCAmelCase : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( _lowercase , return_tensors="""pt""" , padding=_lowercase , truncation=_lowercase , )["""input_ids"""].to(args.device ) UpperCAmelCase : str = rag_model.rag.question_encoder(_lowercase ) UpperCAmelCase : Tuple = question_enc_outputs[0] UpperCAmelCase : Optional[int] = rag_model.retriever( _lowercase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) UpperCAmelCase : Union[str, Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) UpperCAmelCase : str = [] for docs in all_docs: UpperCAmelCase : str = [strip_title(_lowercase ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(_lowercase ) ) return provenance_strings def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple: with torch.no_grad(): UpperCAmelCase : Union[str, Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( _lowercase , return_tensors="""pt""" , padding=_lowercase , truncation=_lowercase ) UpperCAmelCase : List[Any] = inputs_dict.input_ids.to(args.device ) UpperCAmelCase : Optional[Any] = inputs_dict.attention_mask.to(args.device ) UpperCAmelCase : Any = rag_model.generate( # rag_model overwrites generate _lowercase , attention_mask=_lowercase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_lowercase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) UpperCAmelCase : Optional[Any] = rag_model.retriever.generator_tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase ) if args.print_predictions: for q, a in zip(_lowercase , _lowercase ): logger.info("""Q: {} - A: {}""".format(_lowercase , _lowercase ) ) return answers def __lowerCamelCase ( ) -> List[Any]: UpperCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=_lowercase , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=_lowercase , choices=["""exact""", """compressed""", """legacy"""] , type=_lowercase , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=_lowercase , type=_lowercase , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=_lowercase , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=_lowercase , type=_lowercase , required=_lowercase , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=_lowercase , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=_lowercase , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=_lowercase , type=_lowercase , required=_lowercase , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=_lowercase , type=_lowercase , required=_lowercase , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=_lowercase , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=_lowercase , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=_lowercase , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=_lowercase , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=_lowercase , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=5_0 , type=_lowercase , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) UpperCAmelCase : int = parser.parse_args() UpperCAmelCase : Union[str, Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def __lowerCamelCase ( _lowercase ) -> List[str]: UpperCAmelCase : Optional[int] = {} if args.model_type is None: UpperCAmelCase : Optional[int] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): UpperCAmelCase : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration UpperCAmelCase : int = args.n_docs if args.index_name is not None: UpperCAmelCase : List[str] = args.index_name if args.index_path is not None: UpperCAmelCase : List[str] = args.index_path else: UpperCAmelCase : Tuple = BartForConditionalGeneration UpperCAmelCase : int = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , _lowercase ) UpperCAmelCase : int = get_scores if args.eval_mode == """e2e""" else get_precision_at_k UpperCAmelCase : int = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(_lowercase , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(_lowercase ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): UpperCAmelCase : Optional[int] = RagRetriever.from_pretrained(_lowercase , **_lowercase ) UpperCAmelCase : Optional[int] = model_class.from_pretrained(_lowercase , retriever=_lowercase , **_lowercase ) model.retriever.init_retrieval() else: UpperCAmelCase : Union[str, Any] = model_class.from_pretrained(_lowercase , **_lowercase ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: UpperCAmelCase : Any = [] for line in tqdm(_lowercase ): questions.append(line.strip() ) if len(_lowercase ) == args.eval_batch_size: UpperCAmelCase : Any = evaluate_batch_fn(_lowercase , _lowercase , _lowercase ) preds_file.write("""\n""".join(_lowercase ) + """\n""" ) preds_file.flush() UpperCAmelCase : Any = [] if len(_lowercase ) > 0: UpperCAmelCase : Optional[Any] = evaluate_batch_fn(_lowercase , _lowercase , _lowercase ) preds_file.write("""\n""".join(_lowercase ) ) preds_file.flush() score_fn(_lowercase , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": a : Optional[int] = get_args() main(args)
265
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax a : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(__magic_name__ ) class UpperCamelCase_ ( __magic_name__ ): def __init__( self , **A ) -> List[str]: super().__init__(**A ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self , A , **A ) -> Optional[Any]: return super().__call__(A , **A ) def _lowercase( self , **A ) -> Optional[Any]: UpperCAmelCase : List[Any] = {} if "candidate_labels" in kwargs: UpperCAmelCase : Dict = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: UpperCAmelCase : Optional[Any] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def _lowercase( self , A , A=None , A="This is a photo of {}." ) -> Optional[Any]: UpperCAmelCase : int = load_image(A ) UpperCAmelCase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) UpperCAmelCase : List[str] = candidate_labels UpperCAmelCase : Tuple = [hypothesis_template.format(A ) for x in candidate_labels] UpperCAmelCase : Union[str, Any] = self.tokenizer(A , return_tensors=self.framework , padding=A ) UpperCAmelCase : Union[str, Any] = [text_inputs] return inputs def _lowercase( self , A ) -> Optional[int]: UpperCAmelCase : List[Any] = model_inputs.pop("""candidate_labels""" ) UpperCAmelCase : Optional[Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , A ): UpperCAmelCase : Optional[Any] = text_inputs[0] else: # Batching case. UpperCAmelCase : Any = text_inputs[0][0] UpperCAmelCase : Dict = self.model(**A , **A ) UpperCAmelCase : List[Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def _lowercase( self , A ) -> Union[str, Any]: UpperCAmelCase : int = model_outputs.pop("""candidate_labels""" ) UpperCAmelCase : int = model_outputs["""logits"""][0] if self.framework == "pt": UpperCAmelCase : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 ) UpperCAmelCase : Any = probs.tolist() if not isinstance(A , A ): UpperCAmelCase : Any = [scores] elif self.framework == "tf": UpperCAmelCase : List[str] = stable_softmax(A , axis=-1 ) UpperCAmelCase : Union[str, Any] = probs.numpy().tolist() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) UpperCAmelCase : Any = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(A , A ) , key=lambda A : -x[0] ) ] return result
265
1
'''simple docstring''' a : dict[str, float] = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.602176634E-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.3_5_5_8_1_8, } def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> float: if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: UpperCAmelCase : Optional[int] = ( F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' F'''Valid values are: {', '.join(_lowercase )}''' ) raise ValueError(_lowercase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
265
'''simple docstring''' from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __lowerCamelCase ( _lowercase ) -> Optional[Any]: return getitem, k def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]: return setitem, k, v def __lowerCamelCase ( _lowercase ) -> int: return delitem, k def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]: try: return fun(_lowercase , *_lowercase ), None except Exception as e: return None, e a : List[str] = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) a : List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] a : int = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] a : List[Any] = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] a : Tuple = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] a : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( """operations""" , ( pytest.param(_add_items , id="""add items""" ), pytest.param(_overwrite_items , id="""overwrite items""" ), pytest.param(_delete_items , id="""delete items""" ), pytest.param(_access_absent_items , id="""access absent items""" ), pytest.param(_add_with_resize_up , id="""add with resize up""" ), pytest.param(_add_with_resize_down , id="""add with resize down""" ), ) , ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : List[str] = HashMap(initial_block_size=4 ) UpperCAmelCase : Dict = {} for _, (fun, *args) in enumerate(_lowercase ): UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase ) UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase ) assert my_res == py_res assert str(_lowercase ) == str(_lowercase ) assert set(_lowercase ) == set(_lowercase ) assert len(_lowercase ) == len(_lowercase ) assert set(my.items() ) == set(py.items() ) def __lowerCamelCase ( ) -> List[Any]: def is_public(_lowercase ) -> bool: return not name.startswith("""_""" ) UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )} UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )} assert dict_public_names > hash_public_names
265
1
'''simple docstring''' import os a : Union[str, Any] = {"""I""": 1, """V""": 5, """X""": 1_0, """L""": 5_0, """C""": 1_0_0, """D""": 5_0_0, """M""": 1_0_0_0} def __lowerCamelCase ( _lowercase ) -> int: UpperCAmelCase : Tuple = 0 UpperCAmelCase : Any = 0 while index < len(_lowercase ) - 1: UpperCAmelCase : Dict = SYMBOLS[numerals[index]] UpperCAmelCase : Optional[Any] = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def __lowerCamelCase ( _lowercase ) -> str: UpperCAmelCase : Dict = """""" UpperCAmelCase : str = num // 1_0_0_0 numerals += m_count * "M" num %= 1_0_0_0 UpperCAmelCase : Tuple = num // 1_0_0 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 1_0_0 UpperCAmelCase : List[Any] = num // 1_0 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 1_0 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def __lowerCamelCase ( _lowercase = "/p089_roman.txt" ) -> int: UpperCAmelCase : List[Any] = 0 with open(os.path.dirname(_lowercase ) + roman_numerals_filename ) as filea: UpperCAmelCase : Dict = filea.readlines() for line in lines: UpperCAmelCase : str = line.strip() UpperCAmelCase : List[str] = parse_roman_numerals(_lowercase ) UpperCAmelCase : Any = generate_roman_numerals(_lowercase ) savings += len(_lowercase ) - len(_lowercase ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
265
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right a : List[str] = 2_5_0_0_0_4 a : List[str] = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = MBartTokenizer lowercase = MBartTokenizerFast lowercase = True lowercase = True def _lowercase( self ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : str = MBartTokenizer(A , keep_accents=A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self ) -> int: UpperCAmelCase : Optional[Any] = MBartTokenizer(A , keep_accents=A ) UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual( A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def _lowercase( self ) -> Union[str, Any]: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : Optional[int] = tempfile.mkdtemp() UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A ) UpperCAmelCase : int = tokenizer_p.save_pretrained(A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=True UpperCAmelCase : Optional[int] = tempfile.mkdtemp() UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A ) UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A ) # Checks it save with the same files self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : Any = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=False UpperCAmelCase : Optional[Any] = tempfile.mkdtemp() UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A ) UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(A ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : str = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( unittest.TestCase ): lowercase = 'facebook/mbart-large-en-ro' lowercase = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] lowercase = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] lowercase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE] @classmethod def _lowercase( cls ) -> Tuple: UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" ) UpperCAmelCase : int = 1 return cls def _lowercase( self ) -> Union[str, Any]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , A ) def _lowercase( self ) -> List[str]: self.assertIn(A , self.tokenizer.all_special_ids ) UpperCAmelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] UpperCAmelCase : int = self.tokenizer.decode(A , skip_special_tokens=A ) UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A ) self.assertEqual(A , A ) self.assertNotIn(self.tokenizer.eos_token , A ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[str] = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , A ) UpperCAmelCase : int = 10 UpperCAmelCase : List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , A ) self.assertEqual(len(A ) , A ) def _lowercase( self ) -> Tuple: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] ) def _lowercase( self ) -> Dict: UpperCAmelCase : Any = tempfile.mkdtemp() UpperCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(A ) UpperCAmelCase : Tuple = MBartTokenizer.from_pretrained(A ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A ) @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" ) UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(A , A ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) UpperCAmelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , A ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" ) UpperCAmelCase : Dict = self.tokenizer( text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" ) UpperCAmelCase : Dict = targets["""input_ids"""] UpperCAmelCase : Union[str, Any] = shift_tokens_right(A , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[Any] = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(A ) , { # A, test, EOS, en_XX """input_ids""": [[62, 3034, 2, 250004]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 250001, } , )
265
1
'''simple docstring''' from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Optional[int] = logging.get_logger(__name__) a : Union[str, Any] = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class UpperCamelCase_ ( __magic_name__ ): lowercase = 'efficientnet' def __init__( self , A = 3 , A = 600 , A = 2.0 , A = 3.1 , A = 8 , A = [3, 3, 5, 3, 5, 5, 3] , A = [32, 16, 24, 40, 80, 112, 192] , A = [16, 24, 40, 80, 112, 192, 320] , A = [] , A = [1, 2, 2, 2, 1, 2, 1] , A = [1, 2, 2, 3, 3, 4, 1] , A = [1, 6, 6, 6, 6, 6, 6] , A = 0.2_5 , A = "swish" , A = 2560 , A = "mean" , A = 0.0_2 , A = 0.0_0_1 , A = 0.9_9 , A = 0.5 , A = 0.2 , **A , ) -> Optional[Any]: super().__init__(**A ) UpperCAmelCase : Optional[Any] = num_channels UpperCAmelCase : Tuple = image_size UpperCAmelCase : Optional[Any] = width_coefficient UpperCAmelCase : Optional[Any] = depth_coefficient UpperCAmelCase : Dict = depth_divisor UpperCAmelCase : Union[str, Any] = kernel_sizes UpperCAmelCase : Union[str, Any] = in_channels UpperCAmelCase : str = out_channels UpperCAmelCase : List[Any] = depthwise_padding UpperCAmelCase : List[Any] = strides UpperCAmelCase : List[Any] = num_block_repeats UpperCAmelCase : Dict = expand_ratios UpperCAmelCase : Optional[Any] = squeeze_expansion_ratio UpperCAmelCase : str = hidden_act UpperCAmelCase : Tuple = hidden_dim UpperCAmelCase : int = pooling_type UpperCAmelCase : Optional[int] = initializer_range UpperCAmelCase : int = batch_norm_eps UpperCAmelCase : Tuple = batch_norm_momentum UpperCAmelCase : Any = dropout_rate UpperCAmelCase : Tuple = drop_connect_rate UpperCAmelCase : Optional[int] = sum(A ) * 4 class UpperCamelCase_ ( __magic_name__ ): lowercase = version.parse('1.11' ) @property def _lowercase( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _lowercase( self ) -> float: return 1e-5
265
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 a : List[str] = get_tests_dir("""fixtures""") class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: # A mock response for an HTTP head request to emulate server down UpperCAmelCase : Tuple = mock.Mock() UpperCAmelCase : List[str] = 500 UpperCAmelCase : Any = {} UpperCAmelCase : List[str] = HTTPError UpperCAmelCase : str = {} # Download this model to make sure it's in the cache. UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head: UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # This check we did call the fake head request mock_head.assert_called() def _lowercase( self ) -> Any: # This test is for deprecated behavior and can be removed in v5 UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" ) def _lowercase( self ) -> Union[str, Any]: with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" ) self.assertIsNotNone(A ) @is_staging_test class UpperCamelCase_ ( unittest.TestCase ): @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def _lowercase( cls ) -> List[str]: try: delete_repo(token=cls._token , repo_id="""test-image-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" ) except HTTPError: pass def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> List[str]: UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> Optional[int]: CustomImageProcessor.register_for_auto_class() UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
265
1
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> str: UpperCAmelCase : int = len(_lowercase ) UpperCAmelCase : int = len(_lowercase ) UpperCAmelCase : int = ( first_str_length if first_str_length > second_str_length else second_str_length ) UpperCAmelCase : list = [] for char_count in range(_lowercase ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(_lowercase ) if __name__ == "__main__": print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
265
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def __lowerCamelCase ( _lowercase ) -> Tuple: return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Any = create_tensor(_lowercase ) UpperCAmelCase : Union[str, Any] = gather(_lowercase ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : Any = [state.process_index] UpperCAmelCase : Union[str, Any] = gather_object(_lowercase ) assert len(_lowercase ) == state.num_processes, F'''{gathered_obj}, {len(_lowercase )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}''' def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Optional[int] = create_tensor(_lowercase ) UpperCAmelCase : List[str] = broadcast(_lowercase ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Tuple: # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device ) else: UpperCAmelCase : Tuple = torch.arange(state.num_processes ).to(state.device ) UpperCAmelCase : Optional[Any] = pad_across_processes(_lowercase ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def __lowerCamelCase ( _lowercase ) -> Dict: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Optional[Any] = create_tensor(_lowercase ) UpperCAmelCase : Optional[Any] = reduce(_lowercase , """sum""" ) UpperCAmelCase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[Any]: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Tuple = create_tensor(_lowercase ) UpperCAmelCase : Optional[int] = reduce(_lowercase , """mean""" ) UpperCAmelCase : str = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[int]: # For xla_spawn (TPUs) main() def __lowerCamelCase ( ) -> int: UpperCAmelCase : List[Any] = PartialState() state.print(F'''State: {state}''' ) state.print("""testing gather""" ) test_gather(_lowercase ) state.print("""testing gather_object""" ) test_gather_object(_lowercase ) state.print("""testing broadcast""" ) test_broadcast(_lowercase ) state.print("""testing pad_across_processes""" ) test_pad_across_processes(_lowercase ) state.print("""testing reduce_sum""" ) test_reduce_sum(_lowercase ) state.print("""testing reduce_mean""" ) test_reduce_mean(_lowercase ) if __name__ == "__main__": main()
265
1
'''simple docstring''' def __lowerCamelCase ( _lowercase ) -> bool: return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") ) def __lowerCamelCase ( _lowercase ) -> bool: UpperCAmelCase : List[str] = credit_card_number UpperCAmelCase : Any = 0 UpperCAmelCase : Union[str, Any] = len(_lowercase ) - 2 for i in range(_lowercase , -1 , -2 ): # double the value of every second digit UpperCAmelCase : Any = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 1_0 digit += 1 UpperCAmelCase : Union[str, Any] = cc_number[:i] + str(_lowercase ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(_lowercase ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 1_0 == 0 def __lowerCamelCase ( _lowercase ) -> bool: UpperCAmelCase : str = F'''{credit_card_number} is an invalid credit card number because''' if not credit_card_number.isdigit(): print(F'''{error_message} it has nonnumerical characters.''' ) return False if not 1_3 <= len(_lowercase ) <= 1_6: print(F'''{error_message} of its length.''' ) return False if not validate_initial_digits(_lowercase ): print(F'''{error_message} of its first two digits.''' ) return False if not luhn_validation(_lowercase ): print(F'''{error_message} it fails the Luhn check.''' ) return False print(F'''{credit_card_number} is a valid credit card number.''' ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("""4111111111111111""") validate_credit_card_number("""32323""")
265
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ ( __magic_name__ ): def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]: super().__init__() self.register_modules( vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , ) def _lowercase( self , A = "auto" ) -> List[Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A ) def _lowercase( self ) -> Dict: self.enable_attention_slicing(A ) @torch.no_grad() def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]: if isinstance(A , A ): UpperCAmelCase : List[str] = 1 elif isinstance(A , A ): UpperCAmelCase : Dict = len(A ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(A )}.''' ) # get prompt text embeddings UpperCAmelCase : List[str] = self.tokenizer( A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCAmelCase : List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 ) UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCAmelCase : Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCAmelCase : List[str] if negative_prompt is None: UpperCAmelCase : Any = [""""""] elif type(A ) is not type(A ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !=''' f''' {type(A )}.''' ) elif isinstance(A , A ): UpperCAmelCase : Optional[int] = [negative_prompt] elif batch_size != len(A ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: UpperCAmelCase : Any = negative_prompt UpperCAmelCase : Dict = text_input_ids.shape[-1] UpperCAmelCase : List[Any] = self.tokenizer( A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , ) UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase : int = uncond_embeddings.shape[1] UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 ) UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) UpperCAmelCase : str = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCAmelCase : Dict = torch.randn( A , generator=A , device="""cpu""" , dtype=A ).to(self.device ) UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to( self.device ) else: UpperCAmelCase : int = torch.randn( A , generator=A , device=self.device , dtype=A ) UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A ) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCAmelCase : Optional[Any] = latents_reference.to(self.device ) UpperCAmelCase : Tuple = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2 UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2 UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx UpperCAmelCase : List[str] = 0 if dy < 0 else dy UpperCAmelCase : Union[str, Any] = max(-dx , 0 ) UpperCAmelCase : List[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(A ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCAmelCase : Optional[Any] = {} if accepts_eta: UpperCAmelCase : List[str] = eta for i, t in enumerate(self.progress_bar(A ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase : str = self.scheduler.scale_model_input(A , A ) # predict the noise residual UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample # perform guidance if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 ) UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A , A , A ) UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents UpperCAmelCase : Tuple = self.vae.decode(A ).sample UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to( self.device ) UpperCAmelCase , UpperCAmelCase : int = self.safety_checker( images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: UpperCAmelCase : Any = None if output_type == "pil": UpperCAmelCase : int = self.numpy_to_pil(A ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
265
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCamelCase_ ( metaclass=__magic_name__ ): lowercase = ['flax'] def __init__( self , *A , **A ) -> Dict: requires_backends(self , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> str: requires_backends(cls , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> List[Any]: requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=__magic_name__ ): lowercase = ['flax'] def __init__( self , *A , **A ) -> Optional[int]: requires_backends(self , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> int: requires_backends(cls , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> Optional[Any]: requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=__magic_name__ ): lowercase = ['flax'] def __init__( self , *A , **A ) -> Optional[Any]: requires_backends(self , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> int: requires_backends(cls , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> List[Any]: requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=__magic_name__ ): lowercase = ['flax'] def __init__( self , *A , **A ) -> Optional[int]: requires_backends(self , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> Any: requires_backends(cls , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> Union[str, Any]: requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=__magic_name__ ): lowercase = ['flax'] def __init__( self , *A , **A ) -> Tuple: requires_backends(self , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> int: requires_backends(cls , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> Any: requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=__magic_name__ ): lowercase = ['flax'] def __init__( self , *A , **A ) -> Union[str, Any]: requires_backends(self , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> List[str]: requires_backends(cls , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> Optional[Any]: requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=__magic_name__ ): lowercase = ['flax'] def __init__( self , *A , **A ) -> Any: requires_backends(self , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> str: requires_backends(cls , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> str: requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=__magic_name__ ): lowercase = ['flax'] def __init__( self , *A , **A ) -> Optional[Any]: requires_backends(self , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> int: requires_backends(cls , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> int: requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=__magic_name__ ): lowercase = ['flax'] def __init__( self , *A , **A ) -> Dict: requires_backends(self , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> Dict: requires_backends(cls , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> List[str]: requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=__magic_name__ ): lowercase = ['flax'] def __init__( self , *A , **A ) -> List[Any]: requires_backends(self , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> int: requires_backends(cls , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> Dict: requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=__magic_name__ ): lowercase = ['flax'] def __init__( self , *A , **A ) -> int: requires_backends(self , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> int: requires_backends(cls , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> Optional[Any]: requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=__magic_name__ ): lowercase = ['flax'] def __init__( self , *A , **A ) -> Dict: requires_backends(self , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> Optional[Any]: requires_backends(cls , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> List[str]: requires_backends(cls , ["""flax"""] ) class UpperCamelCase_ ( metaclass=__magic_name__ ): lowercase = ['flax'] def __init__( self , *A , **A ) -> Optional[int]: requires_backends(self , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> Tuple: requires_backends(cls , ["""flax"""] ) @classmethod def _lowercase( cls , *A , **A ) -> List[Any]: requires_backends(cls , ["""flax"""] )
265
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : def __init__( self , A , A=12 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=512 , A=0.0_2 , A=0 , A=None , ) -> Any: UpperCAmelCase : Optional[Any] = parent UpperCAmelCase : str = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : int = use_input_mask UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : Dict = vocab_size UpperCAmelCase : str = hidden_size UpperCAmelCase : List[Any] = projection_dim UpperCAmelCase : Tuple = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : Optional[Any] = intermediate_size UpperCAmelCase : Any = dropout UpperCAmelCase : List[Any] = attention_dropout UpperCAmelCase : Optional[Any] = max_position_embeddings UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Optional[Any] = scope UpperCAmelCase : Union[str, Any] = bos_token_id def _lowercase( self ) -> Tuple: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase : Tuple = input_mask.numpy() UpperCAmelCase , UpperCAmelCase : int = input_mask.shape UpperCAmelCase : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(A ): UpperCAmelCase : Tuple = 1 UpperCAmelCase : Optional[Any] = 0 UpperCAmelCase : int = self.get_config() return config, input_ids, tf.convert_to_tensor(A ) def _lowercase( self ) -> int: return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : int = TFBlipTextModel(config=A ) UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , training=A ) UpperCAmelCase : int = model(A , training=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Dict = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = (TFBlipTextModel,) if is_tf_available() else () lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> int: UpperCAmelCase : Union[str, Any] = BlipTextModelTester(self ) UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 ) def _lowercase( self ) -> Tuple: self.config_tester.run_common_tests() def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def _lowercase( self ) -> List[str]: pass def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def _lowercase( self ) -> Union[str, Any]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Dict: pass @slow def _lowercase( self ) -> Dict: for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Any = TFBlipTextModel.from_pretrained(A ) self.assertIsNotNone(A ) def _lowercase( self , A=True ) -> str: super().test_pt_tf_model_equivalence(allow_missing_keys=A )
265
1
'''simple docstring''' from itertools import count def __lowerCamelCase ( _lowercase = 5_0 ) -> int: UpperCAmelCase : Any = [1] * min_block_length for n in count(_lowercase ): fill_count_functions.append(1 ) for block_length in range(_lowercase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_0_0_0_0_0_0: break return n if __name__ == "__main__": print(F'''{solution() = }''')
265
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification a : str = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co a : int = """main""" # Default branch name a : Any = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2""" # One particular commit (not the top of `main`) a : str = """aaaaaaa""" # This commit does not exist, so we should 404. a : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684""" # Sha-1 of config.json on the top of `main`, for checking purposes a : Any = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3""" @contextlib.contextmanager def __lowerCamelCase ( ) -> List[str]: print("""Welcome!""" ) yield print("""Bye!""" ) @contextlib.contextmanager def __lowerCamelCase ( ) -> Optional[int]: print("""Bonjour!""" ) yield print("""Au revoir!""" ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> List[Any]: # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec("""transformers""" ) is not None class UpperCamelCase_ ( unittest.TestCase ): @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Tuple: with ContextManagers([] ): print("""Transformers are awesome!""" ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Dict: with ContextManagers([context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Union[str, Any]: with ContextManagers([context_fr(), context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" ) @require_torch def _lowercase( self ) -> Optional[int]: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_tf def _lowercase( self ) -> int: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_flax def _lowercase( self ) -> Any: # Flax models don't have labels self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , [] )
265
1
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate a : Any = trt.Logger(trt.Logger.WARNING) a : List[str] = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) a : List[Any] = logging.getLogger(__name__) a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--onnx_model_path""", default=None, type=str, required=True, help="""Path to ONNX model: """, ) parser.add_argument( """--output_dir""", default=None, type=str, required=True, help="""The output directory where the model checkpoints and predictions will be written.""", ) # Other parameters parser.add_argument( """--tokenizer_name""", default="""""", type=str, required=True, help="""Pretrained tokenizer name or path if not the same as model_name""", ) parser.add_argument( """--version_2_with_negative""", action="""store_true""", help="""If true, the SQuAD examples contain some that do not have an answer.""", ) parser.add_argument( """--null_score_diff_threshold""", type=float, default=0.0, help="""If null_score - best_non_null is greater than the threshold predict null.""", ) parser.add_argument( """--max_seq_length""", default=3_8_4, type=int, help=( """The maximum total input sequence length after WordPiece tokenization. Sequences """ """longer than this will be truncated, and sequences shorter than this will be padded.""" ), ) parser.add_argument( """--doc_stride""", default=1_2_8, type=int, help="""When splitting up a long document into chunks, how much stride to take between chunks.""", ) parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""") parser.add_argument( """--n_best_size""", default=2_0, type=int, help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""", ) parser.add_argument( """--max_answer_length""", default=3_0, type=int, help=( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ), ) parser.add_argument("""--seed""", type=int, default=4_2, help="""random seed for initialization""") parser.add_argument( """--dataset_name""", type=str, default=None, required=True, help="""The name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--dataset_config_name""", type=str, default=None, help="""The configuration name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data.""" ) parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""") parser.add_argument( """--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision instead of 32-bit""", ) parser.add_argument( """--int8""", action="""store_true""", help="""Whether to use INT8""", ) a : int = parser.parse_args() if args.tokenizer_name: a : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported by this script.""" """You can do it from another script, save it, and load it from here, using --tokenizer_name.""" ) logger.info("""Training/evaluation parameters %s""", args) a : str = args.per_device_eval_batch_size a : int = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties a : str = True a : Optional[int] = """temp_engine/bert-fp32.engine""" if args.fpaa: a : Any = """temp_engine/bert-fp16.engine""" if args.inta: a : int = """temp_engine/bert-int8.engine""" # import ONNX file if not os.path.exists("""temp_engine"""): os.makedirs("""temp_engine""") a : List[str] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, """rb""") as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network a : Dict = [network.get_input(i) for i in range(network.num_inputs)] a : str = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: a : List[Any] = 1 << 5_0 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) a : Any = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) a : Optional[int] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, """wb""") as f: f.write(engine.serialize()) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : List[Any] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) UpperCAmelCase : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) UpperCAmelCase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _lowercase ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _lowercase ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _lowercase ) # start time UpperCAmelCase : str = time.time() # Run inference context.execute_async( bindings=[int(_lowercase ) for d_inp in d_inputs] + [int(_lowercase ), int(_lowercase )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(_lowercase , _lowercase , _lowercase ) cuda.memcpy_dtoh_async(_lowercase , _lowercase , _lowercase ) # Synchronize the stream and take time stream.synchronize() # end time UpperCAmelCase : Any = time.time() UpperCAmelCase : Union[str, Any] = end_time - start_time UpperCAmelCase : Union[str, Any] = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. a : List[str] = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. a : Optional[int] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError("""Evaluation requires a dataset name""") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. a : Dict = raw_datasets["""validation"""].column_names a : List[Any] = """question""" if """question""" in column_names else column_names[0] a : Dict = """context""" if """context""" in column_names else column_names[1] a : Tuple = """answers""" if """answers""" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). a : Optional[Any] = tokenizer.padding_side == """right""" if args.max_seq_length > tokenizer.model_max_length: logger.warning( F'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) a : Tuple = min(args.max_seq_length, tokenizer.model_max_length) def __lowerCamelCase ( _lowercase ) -> List[Any]: # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace UpperCAmelCase : Dict = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. UpperCAmelCase : Any = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=_lowercase , stride=args.doc_stride , return_overflowing_tokens=_lowercase , return_offsets_mapping=_lowercase , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. UpperCAmelCase : str = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. UpperCAmelCase : str = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). UpperCAmelCase : Optional[int] = tokenized_examples.sequence_ids(_lowercase ) UpperCAmelCase : Any = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. UpperCAmelCase : Dict = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. UpperCAmelCase : Dict = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples a : List[str] = raw_datasets["""validation"""] # Validation Feature Creation a : Any = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="""Running tokenizer on validation dataset""", ) a : Dict = default_data_collator a : Dict = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""]) a : Optional[int] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase="eval" ) -> Tuple: # Post-processing: we match the start logits and end logits to answers in the original context. UpperCAmelCase : Dict = postprocess_qa_predictions( examples=_lowercase , features=_lowercase , predictions=_lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_lowercase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: UpperCAmelCase : Union[str, Any] = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: UpperCAmelCase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] UpperCAmelCase : int = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=_lowercase , label_ids=_lowercase ) a : Dict = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""") # Evaluation! logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path) with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: return trt.volume(engine.get_binding_shape(_lowercase ) ) * engine.get_binding_dtype(_lowercase ).itemsize # Allocate device memory for inputs and outputs. a : Tuple = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer a : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) a : Optional[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) a : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes) a : int = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. a : int = cuda.Stream() # Evaluation logger.info("""***** Running Evaluation *****""") logger.info(F''' Num examples = {len(eval_dataset)}''') logger.info(F''' Batch size = {args.per_device_eval_batch_size}''') a : Tuple = 0.0 a : Any = 0 a : Any = timeit.default_timer() a : Dict = None for step, batch in enumerate(eval_dataloader): a , a : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 a , a : Any = outputs a : Union[str, Any] = torch.tensor(start_logits) a : int = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered a : List[str] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0) a : Union[str, Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0) a : Dict = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) a : Optional[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0) if all_preds is not None: a : str = nested_truncate(all_preds, len(eval_dataset)) a : Optional[Any] = timeit.default_timer() - start_time logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1_0_0_0 / niter)) logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1_0_0_0)) logger.info("""Total Number of Inference = %d""", niter) a : List[str] = post_processing_function(eval_examples, eval_dataset, all_preds) a : int = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(F'''Evaluation metrics: {eval_metric}''')
265
'''simple docstring''' from itertools import count def __lowerCamelCase ( _lowercase = 5_0 ) -> int: UpperCAmelCase : Any = [1] * min_block_length for n in count(_lowercase ): fill_count_functions.append(1 ) for block_length in range(_lowercase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_0_0_0_0_0_0: break return n if __name__ == "__main__": print(F'''{solution() = }''')
265
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a : Dict = { """configuration_longformer""": [ """LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongformerConfig""", """LongformerOnnxConfig""", ], """tokenization_longformer""": ["""LongformerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Union[str, Any] = ["""LongformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[str] = [ """LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongformerForMaskedLM""", """LongformerForMultipleChoice""", """LongformerForQuestionAnswering""", """LongformerForSequenceClassification""", """LongformerForTokenClassification""", """LongformerModel""", """LongformerPreTrainedModel""", """LongformerSelfAttention""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ """TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLongformerForMaskedLM""", """TFLongformerForMultipleChoice""", """TFLongformerForQuestionAnswering""", """TFLongformerForSequenceClassification""", """TFLongformerForTokenClassification""", """TFLongformerModel""", """TFLongformerPreTrainedModel""", """TFLongformerSelfAttention""", ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
'''simple docstring''' from __future__ import annotations import math class UpperCamelCase_ : def __init__( self , A ) -> None: UpperCAmelCase : Optional[int] = size # approximate the overall size of segment tree with given value UpperCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )] # create array to store lazy update UpperCAmelCase : Any = [0 for i in range(0 , 4 * size )] UpperCAmelCase : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update def _lowercase( self , A ) -> int: return idx * 2 def _lowercase( self , A ) -> int: return idx * 2 + 1 def _lowercase( self , A , A , A , A ) -> None: if left_element == right_element: UpperCAmelCase : str = a[left_element - 1] else: UpperCAmelCase : Tuple = (left_element + right_element) // 2 self.build(self.left(A ) , A , A , A ) self.build(self.right(A ) , mid + 1 , A , A ) UpperCAmelCase : str = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) def _lowercase( self , A , A , A , A , A , A ) -> bool: if self.flag[idx] is True: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : int = False if left_element != right_element: UpperCAmelCase : List[str] = self.lazy[idx] UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : int = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: UpperCAmelCase : Optional[Any] = val if left_element != right_element: UpperCAmelCase : Tuple = val UpperCAmelCase : int = val UpperCAmelCase : Any = True UpperCAmelCase : str = True return True UpperCAmelCase : str = (left_element + right_element) // 2 self.update(self.left(A ) , A , A , A , A , A ) self.update(self.right(A ) , mid + 1 , A , A , A , A ) UpperCAmelCase : List[str] = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) return True def _lowercase( self , A , A , A , A , A ) -> int | float: if self.flag[idx] is True: UpperCAmelCase : Any = self.lazy[idx] UpperCAmelCase : Any = False if left_element != right_element: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : Tuple = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : Tuple = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] UpperCAmelCase : Dict = (left_element + right_element) // 2 UpperCAmelCase : List[Any] = self.query(self.left(A ) , A , A , A , A ) UpperCAmelCase : str = self.query(self.right(A ) , mid + 1 , A , A , A ) return max(A , A ) def __str__( self ) -> str: return str([self.query(1 , 1 , self.size , A , A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": a : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] a : Optional[Any] = 1_5 a : Union[str, Any] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
265
1
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version a : Dict = logging.getLogger(__name__) require_version("""pytorch_lightning>=1.0.4""") a : Union[str, Any] = { """base""": AutoModel, """sequence-classification""": AutoModelForSequenceClassification, """question-answering""": AutoModelForQuestionAnswering, """pretraining""": AutoModelForPreTraining, """token-classification""": AutoModelForTokenClassification, """language-modeling""": AutoModelWithLMHead, """summarization""": AutoModelForSeqaSeqLM, """translation""": AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization a : Union[str, Any] = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } a : int = sorted(arg_to_scheduler.keys()) a : str = """{""" + """, """.join(arg_to_scheduler_choices) + """}""" class UpperCamelCase_ ( pl.LightningModule ): def __init__( self , A , A=None , A="base" , A=None , A=None , A=None , **A , ) -> Any: super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(A ) UpperCAmelCase : Tuple = 0 UpperCAmelCase : Optional[Any] = Path(self.hparams.output_dir ) UpperCAmelCase : int = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=A , **A , ) else: UpperCAmelCase : PretrainedConfig = config UpperCAmelCase : Any = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(self.hparams , A , A ): assert hasattr(self.config , A ), f'''model config doesn\'t have a `{p}` attribute''' setattr(self.config , A , getattr(self.hparams , A ) ) if tokenizer is None: UpperCAmelCase : int = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=A , ) else: UpperCAmelCase : PreTrainedTokenizer = tokenizer UpperCAmelCase : Dict = MODEL_MODES[mode] if model is None: UpperCAmelCase : Tuple = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=A , ) else: UpperCAmelCase : Dict = model def _lowercase( self , *A , **A ) -> Optional[Any]: UpperCAmelCase : int = self.model_type.from_pretrained(*A , **A ) def _lowercase( self ) -> Tuple: UpperCAmelCase : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler] UpperCAmelCase : List[Any] = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) UpperCAmelCase : List[Any] = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1} return scheduler def _lowercase( self ) -> List[str]: UpperCAmelCase : List[str] = self.model UpperCAmelCase : int = ["""bias""", """LayerNorm.weight"""] UpperCAmelCase : Any = [ { """params""": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters """weight_decay""": self.hparams.weight_decay, }, { """params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] if self.hparams.adafactor: UpperCAmelCase : str = Adafactor( A , lr=self.hparams.learning_rate , scale_parameter=A , relative_step=A ) else: UpperCAmelCase : int = AdamW( A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) UpperCAmelCase : Any = optimizer UpperCAmelCase : Optional[int] = self.get_lr_scheduler() return [optimizer], [scheduler] def _lowercase( self , A , A ) -> Union[str, Any]: return self.validation_step(A , A ) def _lowercase( self , A ) -> Optional[Any]: return self.validation_end(A ) def _lowercase( self ) -> int: UpperCAmelCase : Union[str, Any] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores UpperCAmelCase : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def _lowercase( self , A ) -> Any: if stage == "test": UpperCAmelCase : List[str] = len(self.test_dataloader().dataset ) else: UpperCAmelCase : Tuple = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=A ) UpperCAmelCase : str = len(self.train_dataloader().dataset ) def _lowercase( self , A , A , A = False ) -> str: raise NotImplementedError("""You must implement this for your task""" ) def _lowercase( self ) -> Optional[Any]: return self.train_loader def _lowercase( self ) -> Optional[int]: return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=A ) def _lowercase( self ) -> int: return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=A ) def _lowercase( self , A ) -> Tuple: return os.path.join( self.hparams.data_dir , """cached_{}_{}_{}""".format( A , list(filter(A , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def _lowercase( self , A ) -> None: UpperCAmelCase : int = self.output_dir.joinpath("""best_tfmr""" ) UpperCAmelCase : List[Any] = self.step_count self.model.save_pretrained(A ) self.tokenizer.save_pretrained(A ) @staticmethod def _lowercase( A , A ) -> str: parser.add_argument( """--model_name_or_path""" , default=A , type=A , required=A , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--config_name""" , default="""""" , type=A , help="""Pretrained config name or path if not the same as model_name""" ) parser.add_argument( """--tokenizer_name""" , default=A , type=A , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument( """--cache_dir""" , default=str(Path(A ).parent / """test_run""" / """cache""" ) , type=A , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , ) parser.add_argument( """--encoder_layerdrop""" , type=A , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--decoder_layerdrop""" , type=A , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--dropout""" , type=A , help="""Dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--attention_dropout""" , type=A , help="""Attention dropout probability (Optional). Goes into model.config""" , ) parser.add_argument("""--learning_rate""" , default=5e-5 , type=A , help="""The initial learning rate for Adam.""" ) parser.add_argument( """--lr_scheduler""" , default="""linear""" , choices=A , metavar=A , type=A , help="""Learning rate scheduler""" , ) parser.add_argument("""--weight_decay""" , default=0.0 , type=A , help="""Weight decay if we apply some.""" ) parser.add_argument("""--adam_epsilon""" , default=1e-8 , type=A , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--warmup_steps""" , default=0 , type=A , help="""Linear warmup over warmup_steps.""" ) parser.add_argument("""--num_workers""" , default=4 , type=A , help="""kwarg passed to DataLoader""" ) parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=A ) parser.add_argument("""--train_batch_size""" , default=32 , type=A ) parser.add_argument("""--eval_batch_size""" , default=32 , type=A ) parser.add_argument("""--adafactor""" , action="""store_true""" ) class UpperCamelCase_ ( pl.Callback ): def _lowercase( self , A , A ) -> Tuple: if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class UpperCamelCase_ ( pl.Callback ): def _lowercase( self , A , A ) -> List[Any]: # print(pl_module.model.rag) for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(A ) class UpperCamelCase_ ( pl.Callback ): def _lowercase( self , A , A ) -> str: UpperCAmelCase : List[str] = trainer.lr_schedulers[0]["""scheduler"""] UpperCAmelCase : str = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(A ) def _lowercase( self , A , A ) -> List[str]: rank_zero_info("""***** Validation results *****""" ) UpperCAmelCase : int = trainer.callback_metrics # Log results for key in sorted(A ): if key not in ["log", "progress_bar"]: rank_zero_info("""{} = {}\n""".format(A , str(metrics[key] ) ) ) def _lowercase( self , A , A ) -> Tuple: rank_zero_info("""***** Test results *****""" ) UpperCAmelCase : int = trainer.callback_metrics # Log and save results to file UpperCAmelCase : Union[str, Any] = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" ) with open(A , """w""" ) as writer: for key in sorted(A ): if key not in ["log", "progress_bar"]: rank_zero_info("""{} = {}\n""".format(A , str(metrics[key] ) ) ) writer.write("""{} = {}\n""".format(A , str(metrics[key] ) ) ) def __lowerCamelCase ( _lowercase , _lowercase ) -> None: # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( """--output_dir""" , default=str(Path(_lowercase ).parent / """test_run""" / """model_checkpoints""" ) , type=_lowercase , help="""The output directory where the model predictions and checkpoints will be written.""" , ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=_lowercase , default="""O2""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=_lowercase ) parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=_lowercase , help="""Max gradient norm""" ) parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" ) parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" ) parser.add_argument( """--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=_lowercase , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , ) parser.add_argument("""--seed""" , type=_lowercase , default=4_2 , help="""random seed for initialization""" ) parser.add_argument( """--data_dir""" , default=str(Path(_lowercase ).parent / """test_run""" / """dummy-train-data""" ) , type=_lowercase , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=None , _lowercase=True , _lowercase=[] , _lowercase=None , _lowercase=None , **_lowercase , ) -> Union[str, Any]: pl.seed_everything(args.seed ) # init model UpperCAmelCase : Optional[Any] = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=_lowercase ) # add custom checkpoints if checkpoint_callback is None: UpperCAmelCase : List[str] = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(_lowercase ) if logging_callback is None: UpperCAmelCase : List[Any] = LoggingCallback() UpperCAmelCase : Dict = {} if args.fpaa: UpperCAmelCase : List[str] = 1_6 if args.gpus > 1: UpperCAmelCase : str = """auto""" UpperCAmelCase : str = """ddp""" UpperCAmelCase : Any = args.accumulate_grad_batches UpperCAmelCase : List[Any] = None UpperCAmelCase : str = """auto""" UpperCAmelCase : Optional[Any] = pl.Trainer.from_argparse_args( _lowercase , weights_summary=_lowercase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_lowercase , val_check_interval=1 , num_sanity_val_steps=2 , **_lowercase , ) if args.do_train: trainer.fit(_lowercase ) else: print("""RAG modeling tests with new set functions successfuly executed!""" ) return trainer
265
'''simple docstring''' from PIL import Image def __lowerCamelCase ( _lowercase , _lowercase ) -> Image: def brightness(_lowercase ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(_lowercase ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 a : Optional[Any] = change_brightness(img, 1_0_0) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
265
1
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class UpperCamelCase_ ( __magic_name__ ): lowercase = (UniPCMultistepScheduler,) lowercase = (('num_inference_steps', 25),) def _lowercase( self , **A ) -> int: UpperCAmelCase : int = { """num_train_timesteps""": 1000, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """solver_order""": 2, """solver_type""": """bh2""", } config.update(**A ) return config def _lowercase( self , A=0 , **A ) -> Tuple: UpperCAmelCase : int = dict(self.forward_default_kwargs ) UpperCAmelCase : Optional[Any] = kwargs.pop("""num_inference_steps""" , A ) UpperCAmelCase : Optional[int] = self.dummy_sample UpperCAmelCase : Optional[Any] = 0.1 * sample UpperCAmelCase : int = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: UpperCAmelCase : Any = self.get_scheduler_config(**A ) UpperCAmelCase : int = scheduler_class(**A ) scheduler.set_timesteps(A ) # copy over dummy past residuals UpperCAmelCase : Any = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(A ) UpperCAmelCase : List[Any] = scheduler_class.from_pretrained(A ) new_scheduler.set_timesteps(A ) # copy over dummy past residuals UpperCAmelCase : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase , UpperCAmelCase : List[Any] = sample, sample for t in range(A , time_step + scheduler.config.solver_order + 1 ): UpperCAmelCase : Any = scheduler.step(A , A , A , **A ).prev_sample UpperCAmelCase : Dict = new_scheduler.step(A , A , A , **A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _lowercase( self , A=0 , **A ) -> List[str]: UpperCAmelCase : List[str] = dict(self.forward_default_kwargs ) UpperCAmelCase : Union[str, Any] = kwargs.pop("""num_inference_steps""" , A ) UpperCAmelCase : Any = self.dummy_sample UpperCAmelCase : Dict = 0.1 * sample UpperCAmelCase : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: UpperCAmelCase : List[Any] = self.get_scheduler_config() UpperCAmelCase : Any = scheduler_class(**A ) scheduler.set_timesteps(A ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase : Dict = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(A ) UpperCAmelCase : int = scheduler_class.from_pretrained(A ) # copy over dummy past residuals new_scheduler.set_timesteps(A ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase : Dict = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase : Dict = scheduler.step(A , A , A , **A ).prev_sample UpperCAmelCase : List[Any] = new_scheduler.step(A , A , A , **A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _lowercase( self , A=None , **A ) -> Dict: if scheduler is None: UpperCAmelCase : List[str] = self.scheduler_classes[0] UpperCAmelCase : Optional[Any] = self.get_scheduler_config(**A ) UpperCAmelCase : Union[str, Any] = scheduler_class(**A ) UpperCAmelCase : Optional[Any] = self.scheduler_classes[0] UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(**A ) UpperCAmelCase : Tuple = scheduler_class(**A ) UpperCAmelCase : Optional[Any] = 10 UpperCAmelCase : Optional[Any] = self.dummy_model() UpperCAmelCase : Tuple = self.dummy_sample_deter scheduler.set_timesteps(A ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : Dict = model(A , A ) UpperCAmelCase : Union[str, Any] = scheduler.step(A , A , A ).prev_sample return sample def _lowercase( self ) -> int: UpperCAmelCase : List[Any] = dict(self.forward_default_kwargs ) UpperCAmelCase : Dict = kwargs.pop("""num_inference_steps""" , A ) for scheduler_class in self.scheduler_classes: UpperCAmelCase : List[str] = self.get_scheduler_config() UpperCAmelCase : Tuple = scheduler_class(**A ) UpperCAmelCase : Optional[Any] = self.dummy_sample UpperCAmelCase : Any = 0.1 * sample if num_inference_steps is not None and hasattr(A , """set_timesteps""" ): scheduler.set_timesteps(A ) elif num_inference_steps is not None and not hasattr(A , """set_timesteps""" ): UpperCAmelCase : Any = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase : Optional[int] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] UpperCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order] UpperCAmelCase : str = scheduler.timesteps[5] UpperCAmelCase : str = scheduler.timesteps[6] UpperCAmelCase : Optional[int] = scheduler.step(A , A , A , **A ).prev_sample UpperCAmelCase : Optional[int] = scheduler.step(A , A , A , **A ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _lowercase( self ) -> List[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults UpperCAmelCase : List[str] = UniPCMultistepScheduler(**self.get_scheduler_config() ) UpperCAmelCase : Union[str, Any] = self.full_loop(scheduler=A ) UpperCAmelCase : Optional[int] = torch.mean(torch.abs(A ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3 UpperCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config ) UpperCAmelCase : Tuple = DEISMultistepScheduler.from_config(scheduler.config ) UpperCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(scheduler.config ) UpperCAmelCase : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) UpperCAmelCase : str = self.full_loop(scheduler=A ) UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(A ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3 def _lowercase( self ) -> Tuple: for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=A ) def _lowercase( self ) -> Dict: self.check_over_configs(thresholding=A ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=A , prediction_type=A , sample_max_value=A , solver_order=A , solver_type=A , ) def _lowercase( self ) -> Tuple: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=A ) def _lowercase( self ) -> Dict: for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=A , solver_type=A , prediction_type=A , ) UpperCAmelCase : Optional[int] = self.full_loop( solver_order=A , solver_type=A , prediction_type=A , ) assert not torch.isnan(A ).any(), "Samples have nan numbers" def _lowercase( self ) -> Dict: self.check_over_configs(lower_order_final=A ) self.check_over_configs(lower_order_final=A ) def _lowercase( self ) -> Optional[Any]: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=A , time_step=0 ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[int] = self.full_loop() UpperCAmelCase : Optional[int] = torch.mean(torch.abs(A ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3 def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Tuple = self.full_loop(prediction_type="""v_prediction""" ) UpperCAmelCase : Optional[int] = torch.mean(torch.abs(A ) ) assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3 def _lowercase( self ) -> List[Any]: UpperCAmelCase : Dict = self.scheduler_classes[0] UpperCAmelCase : Dict = self.get_scheduler_config(thresholding=A , dynamic_thresholding_ratio=0 ) UpperCAmelCase : Dict = scheduler_class(**A ) UpperCAmelCase : Optional[int] = 10 UpperCAmelCase : List[str] = self.dummy_model() UpperCAmelCase : List[Any] = self.dummy_sample_deter.half() scheduler.set_timesteps(A ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : Tuple = model(A , A ) UpperCAmelCase : int = scheduler.step(A , A , A ).prev_sample assert sample.dtype == torch.floataa def _lowercase( self , **A ) -> Optional[int]: for scheduler_class in self.scheduler_classes: UpperCAmelCase : int = self.get_scheduler_config(**A ) UpperCAmelCase : Optional[Any] = scheduler_class(**A ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
265
'''simple docstring''' import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class UpperCamelCase_ : def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Optional[int]: UpperCAmelCase : List[Any] = parent UpperCAmelCase : Optional[int] = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : Dict = use_input_mask UpperCAmelCase : str = use_token_type_ids UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Dict = hidden_size UpperCAmelCase : Dict = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : int = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : List[str] = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : str = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : List[str] = type_sequence_label_size UpperCAmelCase : int = initializer_range UpperCAmelCase : str = num_labels UpperCAmelCase : Optional[int] = num_choices UpperCAmelCase : Dict = scope UpperCAmelCase : Union[str, Any] = vocab_size - 1 def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Any = None if self.use_input_mask: UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : List[str] = None if self.use_labels: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Optional[int] = self.get_config() return config, input_ids, input_mask, token_labels def _lowercase( self ) -> Optional[Any]: return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.prepare_config_and_inputs() UpperCAmelCase : Any = True return config, input_ids, input_mask, token_labels def _lowercase( self , A , A , A ) -> int: UpperCAmelCase : str = GPTNeoXModel(config=A ) model.to(A ) model.eval() UpperCAmelCase : List[str] = model(A , attention_mask=A ) UpperCAmelCase : List[str] = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A ) -> Optional[int]: UpperCAmelCase : str = True UpperCAmelCase : Optional[Any] = GPTNeoXModel(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = model(A , attention_mask=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A , A ) -> List[str]: UpperCAmelCase : Tuple = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase( self , A , A , A , A ) -> Tuple: UpperCAmelCase : List[str] = self.num_labels UpperCAmelCase : Any = GPTNeoXForQuestionAnswering(A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase( self , A , A , A , A ) -> int: UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase( self , A , A , A , A ) -> str: UpperCAmelCase : List[Any] = self.num_labels UpperCAmelCase : Tuple = GPTNeoXForTokenClassification(A ) model.to(A ) model.eval() UpperCAmelCase : int = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = True UpperCAmelCase : str = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() # first forward pass UpperCAmelCase : List[str] = model(A , attention_mask=A , use_cache=A ) UpperCAmelCase : List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A ) UpperCAmelCase : Any = output_from_no_past["""hidden_states"""][0] UpperCAmelCase : List[str] = model( A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0] # select random slice UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) ) def _lowercase( self ) -> int: UpperCAmelCase : Tuple = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowercase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = GPTNeoXModelTester(self ) UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 ) def _lowercase( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> Optional[Any]: # This regression test was failing with PyTorch < 1.3 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A ) def _lowercase( self ) -> int: UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*A ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def _lowercase( self ) -> Any: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def _lowercase( self ) -> Optional[int]: pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def _lowercase( self , A ) -> str: UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : int = ids_tensor([1, 10] , config.vocab_size ) UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Dict = GPTNeoXModel(A ) original_model.to(A ) original_model.eval() UpperCAmelCase : List[str] = original_model(A ).last_hidden_state UpperCAmelCase : Any = original_model(A ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Any = {"""type""": scaling_type, """factor""": 1_0.0} UpperCAmelCase : str = GPTNeoXModel(A ) scaled_model.to(A ) scaled_model.eval() UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A , A , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: UpperCAmelCase : int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(A ) UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 UpperCAmelCase : List[str] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" UpperCAmelCase : Union[str, Any] = model.generate(**A , do_sample=A , max_new_tokens=20 ) UpperCAmelCase : Tuple = tokenizer.batch_decode(A )[0] self.assertEqual(A , A )
265
1
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple: # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) UpperCAmelCase : List[str] = (boundary[1] - boundary[0]) / steps UpperCAmelCase : Optional[int] = boundary[0] UpperCAmelCase : Tuple = boundary[1] UpperCAmelCase : Optional[int] = make_points(_lowercase , _lowercase , _lowercase ) UpperCAmelCase : Tuple = 0.0 y += (h / 2.0) * f(_lowercase ) for i in x_i: # print(i) y += h * f(_lowercase ) y += (h / 2.0) * f(_lowercase ) return y def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = a + h while x < (b - h): yield x UpperCAmelCase : str = x + h def __lowerCamelCase ( _lowercase ) -> List[str]: # enter your function here UpperCAmelCase : Optional[Any] = (x - 0) * (x - 0) return y def __lowerCamelCase ( ) -> List[Any]: UpperCAmelCase : Optional[int] = 0.0 # Lower bound of integration UpperCAmelCase : str = 1.0 # Upper bound of integration UpperCAmelCase : Optional[int] = 10.0 # define number of steps or resolution UpperCAmelCase : Optional[int] = [a, b] # define boundary of integration UpperCAmelCase : List[Any] = method_a(_lowercase , _lowercase ) print(F'''y = {y}''' ) if __name__ == "__main__": main()
265
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> int: if b == 0: return 1 if (b % 2) == 0: return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) else: return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) def __lowerCamelCase ( _lowercase , _lowercase ) -> float: if b < 0: return 1 / actual_power(_lowercase , _lowercase ) return actual_power(_lowercase , _lowercase ) if __name__ == "__main__": print(power(-2, -3))
265
1
'''simple docstring''' import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("""3.8"""): import importlib_metadata else: import importlib.metadata as importlib_metadata def __lowerCamelCase ( _lowercase , _lowercase=False ) -> str: try: UpperCAmelCase : Union[str, Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. UpperCAmelCase : str = default else: # KEY is set, convert it to True or False. try: UpperCAmelCase : Optional[int] = strtobool(_lowercase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value a : Union[str, Any] = parse_flag_from_env("""RUN_SLOW""", default=False) a : List[str] = parse_flag_from_env("""RUN_REMOTE""", default=False) a : Optional[Any] = parse_flag_from_env("""RUN_LOCAL""", default=True) a : List[Any] = parse_flag_from_env("""RUN_PACKAGED""", default=True) # Compression a : Any = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""") a : Tuple = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""") a : Union[str, Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""") # Audio a : Optional[int] = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""), reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """, ) # Beam a : str = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""), reason="""test requires apache-beam and a compatible dill version""", ) # Dill-cloudpickle compatibility a : Union[str, Any] = pytest.mark.skipif( config.DILL_VERSION <= version.parse("""0.3.2"""), reason="""test requires dill>0.3.2 for cloudpickle compatibility""", ) # Windows a : List[Any] = pytest.mark.skipif( sys.platform == """win32""", reason="""test should not be run on Windows""", ) def __lowerCamelCase ( _lowercase ) -> int: try: import faiss # noqa except ImportError: UpperCAmelCase : Dict = unittest.skip("""test requires faiss""" )(_lowercase ) return test_case def __lowerCamelCase ( _lowercase ) -> Optional[Any]: try: import regex # noqa except ImportError: UpperCAmelCase : Optional[int] = unittest.skip("""test requires regex""" )(_lowercase ) return test_case def __lowerCamelCase ( _lowercase ) -> int: try: import elasticsearch # noqa except ImportError: UpperCAmelCase : Union[str, Any] = unittest.skip("""test requires elasticsearch""" )(_lowercase ) return test_case def __lowerCamelCase ( _lowercase ) -> Optional[int]: try: import sqlalchemy # noqa except ImportError: UpperCAmelCase : Tuple = unittest.skip("""test requires sqlalchemy""" )(_lowercase ) return test_case def __lowerCamelCase ( _lowercase ) -> Optional[int]: if not config.TORCH_AVAILABLE: UpperCAmelCase : Optional[int] = unittest.skip("""test requires PyTorch""" )(_lowercase ) return test_case def __lowerCamelCase ( _lowercase ) -> Optional[Any]: if not config.TF_AVAILABLE: UpperCAmelCase : Tuple = unittest.skip("""test requires TensorFlow""" )(_lowercase ) return test_case def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: if not config.JAX_AVAILABLE: UpperCAmelCase : Tuple = unittest.skip("""test requires JAX""" )(_lowercase ) return test_case def __lowerCamelCase ( _lowercase ) -> Dict: if not config.PIL_AVAILABLE: UpperCAmelCase : Tuple = unittest.skip("""test requires Pillow""" )(_lowercase ) return test_case def __lowerCamelCase ( _lowercase ) -> Tuple: try: import transformers # noqa F401 except ImportError: return unittest.skip("""test requires transformers""" )(_lowercase ) else: return test_case def __lowerCamelCase ( _lowercase ) -> str: try: import tiktoken # noqa F401 except ImportError: return unittest.skip("""test requires tiktoken""" )(_lowercase ) else: return test_case def __lowerCamelCase ( _lowercase ) -> Optional[Any]: try: import spacy # noqa F401 except ImportError: return unittest.skip("""test requires spacy""" )(_lowercase ) else: return test_case def __lowerCamelCase ( _lowercase ) -> Optional[int]: def _require_spacy_model(_lowercase ): try: import spacy # noqa F401 spacy.load(_lowercase ) except ImportError: return unittest.skip("""test requires spacy""" )(_lowercase ) except OSError: return unittest.skip("""test requires spacy model '{}'""".format(_lowercase ) )(_lowercase ) else: return test_case return _require_spacy_model def __lowerCamelCase ( _lowercase ) -> Any: try: import pyspark # noqa F401 except ImportError: return unittest.skip("""test requires pyspark""" )(_lowercase ) else: return test_case def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: try: import joblibspark # noqa F401 except ImportError: return unittest.skip("""test requires joblibspark""" )(_lowercase ) else: return test_case def __lowerCamelCase ( _lowercase ) -> Any: if not _run_slow_tests or _run_slow_tests == 0: UpperCAmelCase : Optional[int] = unittest.skip("""test is slow""" )(_lowercase ) return test_case def __lowerCamelCase ( _lowercase ) -> int: if not _run_local_tests or _run_local_tests == 0: UpperCAmelCase : Dict = unittest.skip("""test is local""" )(_lowercase ) return test_case def __lowerCamelCase ( _lowercase ) -> Optional[Any]: if not _run_packaged_tests or _run_packaged_tests == 0: UpperCAmelCase : Dict = unittest.skip("""test is packaged""" )(_lowercase ) return test_case def __lowerCamelCase ( _lowercase ) -> Dict: if not _run_remote_tests or _run_remote_tests == 0: UpperCAmelCase : Union[str, Any] = unittest.skip("""test requires remote""" )(_lowercase ) return test_case def __lowerCamelCase ( *_lowercase ) -> Dict: def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(_lowercase ) and name.startswith("""test""" ): for decorator in decorators: UpperCAmelCase : Any = decorator(_lowercase ) setattr(cls , _lowercase , _lowercase ) return cls return decorate class UpperCamelCase_ ( __magic_name__ ): pass class UpperCamelCase_ ( __magic_name__ ): lowercase = 0 lowercase = 1 lowercase = 2 @contextmanager def __lowerCamelCase ( _lowercase=OfflineSimulationMode.CONNECTION_FAILS , _lowercase=1e-16 ) -> str: UpperCAmelCase : str = requests.Session().request def timeout_request(_lowercase , _lowercase , _lowercase , **_lowercase ): # Change the url to an invalid url so that the connection hangs UpperCAmelCase : str = """https://10.255.255.1""" if kwargs.get("""timeout""" ) is None: raise RequestWouldHangIndefinitelyError( F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) UpperCAmelCase : Optional[Any] = timeout try: return online_request(_lowercase , _lowercase , **_lowercase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier UpperCAmelCase : Any = url UpperCAmelCase : Tuple = e.args[0] UpperCAmelCase : List[str] = (max_retry_error.args[0].replace("""10.255.255.1""" , F'''OfflineMock[{url}]''' ),) UpperCAmelCase : Optional[Any] = (max_retry_error,) raise def raise_connection_error(_lowercase , _lowercase , **_lowercase ): raise requests.ConnectionError("""Offline mode is enabled.""" , request=_lowercase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("""requests.Session.send""" , _lowercase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("""requests.Session.request""" , _lowercase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("""datasets.config.HF_DATASETS_OFFLINE""" , _lowercase ): yield else: raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" ) @contextmanager def __lowerCamelCase ( *_lowercase , **_lowercase ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = str(Path().resolve() ) with tempfile.TemporaryDirectory(*_lowercase , **_lowercase ) as tmp_dir: try: os.chdir(_lowercase ) yield finally: os.chdir(_lowercase ) @contextmanager def __lowerCamelCase ( ) -> str: import gc gc.collect() UpperCAmelCase : Any = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def __lowerCamelCase ( ) -> Dict: import gc gc.collect() UpperCAmelCase : Optional[Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def __lowerCamelCase ( _lowercase , _lowercase ) -> Any: return deepcopy(_lowercase ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(_lowercase ).integers(0 , 1_0_0 , 1_0 ).tolist() def __lowerCamelCase ( _lowercase ) -> Dict: import decorator from requests.exceptions import HTTPError def _wrapper(_lowercase , *_lowercase , **_lowercase ): try: return func(*_lowercase , **_lowercase ) except HTTPError as err: if str(_lowercase ).startswith("""500""" ) or str(_lowercase ).startswith("""502""" ): pytest.xfail(str(_lowercase ) ) raise err return decorator.decorator(_wrapper , _lowercase ) class UpperCamelCase_ : def __init__( self , A , A , A ) -> Any: UpperCAmelCase : Optional[Any] = returncode UpperCAmelCase : Optional[Any] = stdout UpperCAmelCase : Optional[Any] = stderr async def __lowerCamelCase ( _lowercase , _lowercase ) -> Any: while True: UpperCAmelCase : Dict = await stream.readline() if line: callback(_lowercase ) else: break async def __lowerCamelCase ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=False ) -> _RunOutput: if echo: print("""\nRunning: """ , """ """.join(_lowercase ) ) UpperCAmelCase : Optional[Any] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowercase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) UpperCAmelCase : Optional[int] = [] UpperCAmelCase : Union[str, Any] = [] def tee(_lowercase , _lowercase , _lowercase , _lowercase="" ): UpperCAmelCase : int = line.decode("""utf-8""" ).rstrip() sink.append(_lowercase ) if not quiet: print(_lowercase , _lowercase , file=_lowercase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _lowercase : tee(_lowercase , _lowercase , sys.stdout , label="""stdout:""" ) ), _read_stream(p.stderr , lambda _lowercase : tee(_lowercase , _lowercase , sys.stderr , label="""stderr:""" ) ), ] , timeout=_lowercase , ) return _RunOutput(await p.wait() , _lowercase , _lowercase ) def __lowerCamelCase ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=1_8_0 , _lowercase=False , _lowercase=True ) -> _RunOutput: UpperCAmelCase : List[str] = asyncio.get_event_loop() UpperCAmelCase : Any = loop.run_until_complete( _stream_subprocess(_lowercase , env=_lowercase , stdin=_lowercase , timeout=_lowercase , quiet=_lowercase , echo=_lowercase ) ) UpperCAmelCase : Dict = """ """.join(_lowercase ) if result.returncode > 0: UpperCAmelCase : int = """\n""".join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' ) return result def __lowerCamelCase ( ) -> Optional[int]: UpperCAmelCase : Dict = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" ) UpperCAmelCase : Union[str, Any] = re.sub(R"""^gw""" , """""" , _lowercase , 0 , re.M ) return int(_lowercase ) def __lowerCamelCase ( ) -> List[Any]: UpperCAmelCase : List[str] = 2_9_5_0_0 UpperCAmelCase : Union[str, Any] = pytest_xdist_worker_id() return port + uniq_delta
265
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : Any = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = AlbertTokenizer lowercase = AlbertTokenizerFast lowercase = True lowercase = True lowercase = True def _lowercase( self ) -> str: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : Optional[int] = AlbertTokenizer(A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self , A ) -> int: UpperCAmelCase : Optional[int] = """this is a test""" UpperCAmelCase : Dict = """this is a test""" return input_text, output_text def _lowercase( self ) -> int: UpperCAmelCase : Tuple = """<pad>""" UpperCAmelCase : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A ) def _lowercase( self ) -> Any: UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(A ) , 30000 ) def _lowercase( self ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _lowercase( self ) -> Union[str, Any]: if not self.test_rust_tokenizer: return UpperCAmelCase : int = self.get_tokenizer() UpperCAmelCase : List[str] = self.get_rust_tokenizer() UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé.""" UpperCAmelCase : str = tokenizer.tokenize(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A ) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A ) self.assertListEqual(A , A ) UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A ) self.assertListEqual(A , A ) def _lowercase( self ) -> Any: UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A ) UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : str = AlbertTokenizer(A ) UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" ) UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" ) UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def _lowercase( self ) -> Dict: # fmt: off UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
265
1
'''simple docstring''' import argparse import copy def __lowerCamelCase ( _lowercase ) -> int: UpperCAmelCase : int = {} with open(_lowercase ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: UpperCAmelCase : str = [] _list.append([line.split()[1], line.split()[2]] ) UpperCAmelCase : Union[str, Any] = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: UpperCAmelCase : Tuple = [] _list.append([line.split()[0], line.split()[2]] ) UpperCAmelCase : Any = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def __lowerCamelCase ( _lowercase , _lowercase ) -> int: with open(_lowercase ) as f: UpperCAmelCase : List[str] = f.read(1 ) UpperCAmelCase : Dict = start_node UpperCAmelCase : int = [] UpperCAmelCase : Dict = start_node UpperCAmelCase : str = 0 while visiting not in first_solution: UpperCAmelCase : str = 1_0_0_0_0 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(_lowercase ) and k[0] not in first_solution: UpperCAmelCase : List[str] = k[1] UpperCAmelCase : int = k[0] first_solution.append(_lowercase ) UpperCAmelCase : List[Any] = distance_of_first_solution + int(_lowercase ) UpperCAmelCase : str = best_node first_solution.append(_lowercase ) UpperCAmelCase : List[Any] = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 UpperCAmelCase : Dict = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0_0_0_0 ) return first_solution, distance_of_first_solution def __lowerCamelCase ( _lowercase , _lowercase ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = [] for n in solution[1:-1]: UpperCAmelCase : int = solution.index(_lowercase ) for kn in solution[1:-1]: UpperCAmelCase : Optional[int] = solution.index(_lowercase ) if n == kn: continue UpperCAmelCase : Union[str, Any] = copy.deepcopy(_lowercase ) UpperCAmelCase : Any = kn UpperCAmelCase : str = n UpperCAmelCase : Any = 0 for k in _tmp[:-1]: UpperCAmelCase : int = _tmp[_tmp.index(_lowercase ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: UpperCAmelCase : Union[str, Any] = distance + int(i[1] ) _tmp.append(_lowercase ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) UpperCAmelCase : Optional[int] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda _lowercase : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]: UpperCAmelCase : str = 1 UpperCAmelCase : Optional[Any] = first_solution UpperCAmelCase : Tuple = [] UpperCAmelCase : int = distance_of_first_solution UpperCAmelCase : Tuple = solution while count <= iters: UpperCAmelCase : Tuple = find_neighborhood(_lowercase , _lowercase ) UpperCAmelCase : List[str] = 0 UpperCAmelCase : str = neighborhood[index_of_best_solution] UpperCAmelCase : Dict = len(_lowercase ) - 1 UpperCAmelCase : Tuple = False while not found: UpperCAmelCase : Dict = 0 while i < len(_lowercase ): if best_solution[i] != solution[i]: UpperCAmelCase : Any = best_solution[i] UpperCAmelCase : Union[str, Any] = solution[i] break UpperCAmelCase : str = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) UpperCAmelCase : List[str] = True UpperCAmelCase : List[Any] = best_solution[:-1] UpperCAmelCase : Optional[Any] = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: UpperCAmelCase : Tuple = cost UpperCAmelCase : Optional[Any] = solution else: UpperCAmelCase : List[str] = index_of_best_solution + 1 UpperCAmelCase : Optional[int] = neighborhood[index_of_best_solution] if len(_lowercase ) >= size: tabu_list.pop(0 ) UpperCAmelCase : Union[str, Any] = count + 1 return best_solution_ever, best_cost def __lowerCamelCase ( _lowercase=None ) -> Optional[int]: UpperCAmelCase : str = generate_neighbours(args.File ) UpperCAmelCase , UpperCAmelCase : Optional[int] = generate_first_solution( args.File , _lowercase ) UpperCAmelCase , UpperCAmelCase : Optional[Any] = tabu_search( _lowercase , _lowercase , _lowercase , args.Iterations , args.Size , ) print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser(description="""Tabu Search""") parser.add_argument( """-f""", """--File""", type=str, help="""Path to the file containing the data""", required=True, ) parser.add_argument( """-i""", """--Iterations""", type=int, help="""How many iterations the algorithm should perform""", required=True, ) parser.add_argument( """-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True ) # Pass the arguments to main method main(parser.parse_args())
265
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = StableDiffusionDiffEditPipeline lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} lowercase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase = frozenset([] ) def _lowercase( self ) -> Optional[int]: torch.manual_seed(0 ) UpperCAmelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , ) UpperCAmelCase : int = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , ) UpperCAmelCase : List[Any] = DDIMInverseScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , ) torch.manual_seed(0 ) UpperCAmelCase : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) UpperCAmelCase : Optional[Any] = CLIPTextModel(A ) UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCAmelCase : int = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _lowercase( self , A , A=0 ) -> Optional[Any]: UpperCAmelCase : Any = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A ) if str(A ).startswith("""mps""" ): UpperCAmelCase : List[Any] = torch.manual_seed(A ) else: UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : int = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> Optional[int]: UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : Any = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> str: UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : str = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def _lowercase( self ) -> List[Any]: if not hasattr(self.pipeline_class , """_optional_components""" ): return UpperCAmelCase : Dict = self.get_dummy_components() UpperCAmelCase : int = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A , A , A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) UpperCAmelCase : Any = self.get_dummy_inputs(A ) UpperCAmelCase : Optional[Any] = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) UpperCAmelCase : Dict = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A , A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A ) UpperCAmelCase : Tuple = pipe_loaded(**A )[0] UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max() self.assertLess(A , 1e-4 ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[int] = """cpu""" UpperCAmelCase : Optional[Any] = self.get_dummy_components() UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(A ) UpperCAmelCase : List[Any] = pipe.generate_mask(**A ) UpperCAmelCase : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) UpperCAmelCase : Optional[int] = np.array([0] * 9 ) UpperCAmelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = """cpu""" UpperCAmelCase : List[str] = self.get_dummy_components() UpperCAmelCase : Optional[Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : List[str] = pipe.invert(**A ).images UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Dict = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) def _lowercase( self ) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def _lowercase( self ) -> int: UpperCAmelCase : List[Any] = """cpu""" UpperCAmelCase : int = self.get_dummy_components() UpperCAmelCase : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""} UpperCAmelCase : int = DPMSolverMultistepScheduler(**A ) UpperCAmelCase : int = DPMSolverMultistepInverseScheduler(**A ) UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : Any = pipe.invert(**A ).images UpperCAmelCase : Dict = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Any = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) @require_torch_gpu @slow class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" ) UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) ) UpperCAmelCase : List[str] = raw_image def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Dict = torch.manual_seed(0 ) UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = """a bowl of fruit""" UpperCAmelCase : List[Any] = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Tuple = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents UpperCAmelCase : Any = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0] UpperCAmelCase : List[str] = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1 def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : int = """a bowl of fruit""" UpperCAmelCase : int = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Any = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents UpperCAmelCase : str = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0] UpperCAmelCase : Tuple = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1
265
1
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Optional[int] = { """configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""], """feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""], """processing_mctct""": ["""MCTCTProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Dict = [ """MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MCTCTForCTC""", """MCTCTModel""", """MCTCTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : Dict = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_lowercase , _lowercase ) def __lowerCamelCase ( _lowercase ) -> Tuple: UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape UpperCAmelCase : Dict = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) UpperCAmelCase : Optional[Any] = emb.weight.data return lin_layer def __lowerCamelCase ( _lowercase , _lowercase=None ) -> List[Any]: UpperCAmelCase : Dict = {} for old_key in state_dict.keys(): UpperCAmelCase : str = old_key if "moe_layer.experts." in key: if expert_idx is not None: UpperCAmelCase : int = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' ) else: UpperCAmelCase : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" ) if "gate" in key: UpperCAmelCase : str = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" ) if "fc2" and "experts" not in key: UpperCAmelCase : Tuple = key.replace(""".fc2.""" , """.ffn.fc2.""" ) if "fc1" and "experts" not in key: UpperCAmelCase : List[Any] = key.replace(""".fc1.""" , """.ffn.fc1.""" ) if ".encoder_attn." in key: UpperCAmelCase : Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" ) if "encoder_attn_layer_norm" in key: UpperCAmelCase : str = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" ) if "final_layer_norm" in key: UpperCAmelCase : Optional[int] = key.replace("""final_layer_norm""" , """ff_layer_norm""" ) UpperCAmelCase : str = state_dict[old_key] return new_dict def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ) -> Tuple: UpperCAmelCase : Any = [] UpperCAmelCase : Dict = 0 os.makedirs(_lowercase , exist_ok=_lowercase ) for expert in range(_lowercase ): UpperCAmelCase : str = switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(_lowercase ): UpperCAmelCase : Optional[Any] = torch.load(_lowercase )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : List[str] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : Optional[Any] = os.path.join( _lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) torch.save(_lowercase , _lowercase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_lowercase )[0]].dtype ) # Add the last block UpperCAmelCase : Optional[Any] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) UpperCAmelCase : Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : Optional[Any] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : int = shared_weights["""decoder.embed_tokens.weight"""] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_lowercase ) == 1: UpperCAmelCase : Dict = os.path.join(_lowercase , _lowercase ) torch.save(_lowercase , _lowercase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_lowercase , _lowercase ) # Otherwise, let's build the index UpperCAmelCase : Optional[int] = {} for idx, shard in enumerate(_lowercase ): UpperCAmelCase : List[str] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase ):05d}.bin''' ) UpperCAmelCase : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_lowercase , os.path.join(_lowercase , _lowercase ) ) for key in shard: UpperCAmelCase : Tuple = shard_file # Add the metadata UpperCAmelCase : Any = {"""total_size""": total_size} UpperCAmelCase : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(_lowercase , _lowercase ) , """w""" , encoding="""utf-8""" ) as f: UpperCAmelCase : str = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + """\n""" f.write(_lowercase ) return metadata, index if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) a : int = parser.parse_args() a , a : Any = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) a : str = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) a : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
265
1
'''simple docstring''' from math import ceil, sqrt def __lowerCamelCase ( _lowercase = 1_0_0_0_0_0_0 ) -> int: UpperCAmelCase : Optional[Any] = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: UpperCAmelCase : Optional[Any] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: UpperCAmelCase : Tuple = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(F'''{solution() = }''')
265
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a : List[Any] = logging.get_logger(__name__) a : Union[str, Any] = torch.device("""cpu""") def __lowerCamelCase ( ) -> Any: UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) return im def __lowerCamelCase ( _lowercase ) -> Dict: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str: UpperCAmelCase : Union[str, Any] = dct.pop(_lowercase ) UpperCAmelCase : str = val def __lowerCamelCase ( _lowercase ) -> List[str]: UpperCAmelCase : Tuple = [] for k in state_dict.keys(): UpperCAmelCase : Dict = k if ".pwconv" in k: UpperCAmelCase : Union[str, Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: UpperCAmelCase : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: UpperCAmelCase : Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: UpperCAmelCase : Optional[Any] = k_new.split(""".""" ) if ls[2].isdigit(): UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[Any] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size UpperCAmelCase : List[Any] = 1_0_0_0 UpperCAmelCase : List[str] = """huggingface/label-files""" UpperCAmelCase : Tuple = """imagenet-1k-id2label.json""" UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase : Tuple = {int(_lowercase ): v for k, v in idalabel.items()} UpperCAmelCase : Tuple = idalabel UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": UpperCAmelCase : List[Any] = [3, 3, 6, 4] UpperCAmelCase : int = [4_8, 5_6, 1_1_2, 2_2_0] elif swiftformer_name == "swiftformer_s": UpperCAmelCase : str = [3, 3, 9, 6] UpperCAmelCase : str = [4_8, 6_4, 1_6_8, 2_2_4] elif swiftformer_name == "swiftformer_l1": UpperCAmelCase : List[Any] = [4, 3, 1_0, 5] UpperCAmelCase : Union[str, Any] = [4_8, 9_6, 1_9_2, 3_8_4] elif swiftformer_name == "swiftformer_l3": UpperCAmelCase : Any = [4, 4, 1_2, 6] UpperCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase ) else: UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" ) UpperCAmelCase : str = checkpoint UpperCAmelCase : Tuple = create_rename_keys(_lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) # load HuggingFace model UpperCAmelCase : str = SwiftFormerForImageClassification(_lowercase ).eval() hf_model.load_state_dict(_lowercase ) # prepare test inputs UpperCAmelCase : Any = prepare_img() UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) UpperCAmelCase : List[str] = processor(images=_lowercase , return_tensors="""pt""" ) # compare outputs from both models UpperCAmelCase : List[str] = get_expected_output(_lowercase ) UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1_0_0_0] ) assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' ) hf_model.save_pretrained(_lowercase ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swiftformer_name""", default="""swiftformer_xs""", choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""], type=str, help="""Name of the SwiftFormer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""./converted_outputs/""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""") a : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
265
1
'''simple docstring''' def __lowerCamelCase ( ) -> List[Any]: UpperCAmelCase : List[Any] = 0 for i in range(1 , 1_0_0_1 ): total += i**i return str(_lowercase )[-1_0:] if __name__ == "__main__": print(solution())
265
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def __lowerCamelCase ( ) -> Any: raise RuntimeError("""CUDA out of memory.""" ) class UpperCamelCase_ ( nn.Module ): def __init__( self ) -> Any: super().__init__() UpperCAmelCase : Tuple = nn.Linear(3 , 4 ) UpperCAmelCase : Tuple = nn.BatchNormad(4 ) UpperCAmelCase : int = nn.Linear(4 , 5 ) def _lowercase( self , A ) -> Any: return self.lineara(self.batchnorm(self.lineara(A ) ) ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[int] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(A , [128, 64, 32, 16, 8] ) def _lowercase( self ) -> Any: UpperCAmelCase : Optional[Any] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A , A ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga UpperCAmelCase , UpperCAmelCase : Optional[int] = mock_training_loop_function("""hello""" ) self.assertListEqual(A , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, """hello"""] ) def _lowercase( self ) -> Any: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(A ): pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _lowercase( self ) -> Optional[int]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _lowercase( self ) -> Optional[Any]: @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A , A , A ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(A ) as cm: mock_training_loop_function(128 , """hello""" , """world""" ) self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] ) self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] ) def _lowercase( self ) -> int: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A ): raise ValueError("""Oops, we had an error!""" ) with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] ) @require_cuda def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated() UpperCAmelCase : List[str] = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , A ) UpperCAmelCase : Tuple = release_memory(A ) self.assertEqual(torch.cuda.memory_allocated() , A )
265
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a : Tuple = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConfig""", """AltCLIPTextConfig""", """AltCLIPVisionConfig""", ], """processing_altclip""": ["""AltCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ """ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """AltCLIPPreTrainedModel""", """AltCLIPModel""", """AltCLIPTextModel""", """AltCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Optional[int] = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
1
'''simple docstring''' import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder a : Optional[Any] = """base_with_context""" def __lowerCamelCase ( _lowercase , _lowercase ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) ) UpperCAmelCase : int = nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_lowercase ) for lyr_num, lyr in enumerate(model.encoders ): UpperCAmelCase : Union[str, Any] = weights[F'''layers_{lyr_num}'''] UpperCAmelCase : List[Any] = nn.Parameter( torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) ) UpperCAmelCase : List[Any] = ly_weight["""attention"""] UpperCAmelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) UpperCAmelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) UpperCAmelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) UpperCAmelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) UpperCAmelCase : str = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) UpperCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) UpperCAmelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) UpperCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) UpperCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) ) return model def __lowerCamelCase ( _lowercase , _lowercase ) -> Any: UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) ) UpperCAmelCase : Union[str, Any] = nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_lowercase ) for lyr_num, lyr in enumerate(model.encoders ): UpperCAmelCase : List[str] = weights[F'''layers_{lyr_num}'''] UpperCAmelCase : Any = ly_weight["""attention"""] UpperCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) UpperCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) UpperCAmelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) UpperCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) UpperCAmelCase : Tuple = nn.Parameter( torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) ) UpperCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) UpperCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) UpperCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) ) return model def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]: UpperCAmelCase : str = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) ) UpperCAmelCase : int = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) ) UpperCAmelCase : Dict = nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_lowercase ) UpperCAmelCase : Union[str, Any] = nn.Parameter( torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) ) for lyr_num, lyr in enumerate(model.decoders ): UpperCAmelCase : Union[str, Any] = weights[F'''layers_{lyr_num}'''] UpperCAmelCase : List[str] = nn.Parameter( torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) ) UpperCAmelCase : int = nn.Parameter( torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) ) UpperCAmelCase : Optional[int] = ly_weight["""self_attention"""] UpperCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) UpperCAmelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) UpperCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) UpperCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) UpperCAmelCase : List[Any] = ly_weight["""MultiHeadDotProductAttention_0"""] UpperCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) UpperCAmelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) UpperCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) UpperCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) UpperCAmelCase : Union[str, Any] = nn.Parameter( torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) ) UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) UpperCAmelCase : List[str] = nn.Parameter( torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) ) UpperCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) UpperCAmelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) UpperCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) UpperCAmelCase : str = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) ) UpperCAmelCase : Any = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) ) return model def __lowerCamelCase ( _lowercase ) -> List[str]: UpperCAmelCase : List[Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path ) UpperCAmelCase : List[str] = jnp.tree_util.tree_map(onp.array , _lowercase ) UpperCAmelCase : Optional[int] = [ """from __gin__ import dynamic_registration""", """from music_spectrogram_diffusion.models.diffusion import diffusion_utils""", """diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""", """diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""", ] UpperCAmelCase : int = os.path.join(args.checkpoint_path , """..""" , """config.gin""" ) UpperCAmelCase : Dict = inference.parse_training_gin_file(_lowercase , _lowercase ) UpperCAmelCase : List[Any] = inference.InferenceModel(args.checkpoint_path , _lowercase ) UpperCAmelCase : str = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" ) UpperCAmelCase : Optional[int] = SpectrogramNotesEncoder( max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , ) UpperCAmelCase : Union[str, Any] = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , ) UpperCAmelCase : Optional[int] = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) UpperCAmelCase : Optional[Any] = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , _lowercase ) UpperCAmelCase : Tuple = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , _lowercase ) UpperCAmelCase : str = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , _lowercase ) UpperCAmelCase : Optional[Any] = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" ) UpperCAmelCase : Tuple = SpectrogramDiffusionPipeline( notes_encoder=_lowercase , continuous_encoder=_lowercase , decoder=_lowercase , scheduler=_lowercase , melgan=_lowercase , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""") parser.add_argument( """--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not.""" ) parser.add_argument( """--checkpoint_path""", default=F'''{MODEL}/checkpoint_500000''', type=str, required=False, help="""Path to the original jax model checkpoint.""", ) a : List[Any] = parser.parse_args() main(args)
265
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = LongformerTokenizer lowercase = True lowercase = LongformerTokenizerFast lowercase = True def _lowercase( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase : List[str] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCAmelCase : int = dict(zip(A , range(len(A ) ) ) ) UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A ) ) def _lowercase( self , **A ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , **A ) -> int: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , A ) -> Optional[int]: UpperCAmelCase : Optional[Any] = """lower newer""" UpperCAmelCase : Optional[int] = """lower newer""" return input_text, output_text def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase : Dict = """lower newer""" UpperCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] UpperCAmelCase : Tuple = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokens + [tokenizer.unk_token] UpperCAmelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" ) UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A ) UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A ) UpperCAmelCase : List[str] = tokenizer.encode( """sequence builders""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : List[str] = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = self.get_tokenizer() UpperCAmelCase : List[Any] = """Encode this sequence.""" UpperCAmelCase : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments UpperCAmelCase : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A , A ) UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A , A ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A , A ) # Testing spaces after special tokens UpperCAmelCase : Union[str, Any] = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) UpperCAmelCase : Union[str, Any] = """Encode <mask> sequence""" UpperCAmelCase : Union[str, Any] = """Encode <mask>sequence""" UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Union[str, Any] = encoded.index(A ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A , A ) UpperCAmelCase : Tuple = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = encoded.index(A ) UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A , A ) def _lowercase( self ) -> Optional[int]: pass def _lowercase( self ) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence.""" UpperCAmelCase : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A ) UpperCAmelCase : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def _lowercase( self ) -> List[Any]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A ) self.assertEqual(post_processor_state["""add_prefix_space"""] , A ) self.assertEqual(post_processor_state["""trim_offsets"""] , A ) def _lowercase( self ) -> Optional[Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` UpperCAmelCase : int = f'''{text_of_1_token} {text_of_1_token}''' UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , ) UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , ) UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[Any] = f''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , ) UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
265
1
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ ( __magic_name__ ): def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]: super().__init__() self.register_modules( vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , ) def _lowercase( self , A = "auto" ) -> List[Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A ) def _lowercase( self ) -> Dict: self.enable_attention_slicing(A ) @torch.no_grad() def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]: if isinstance(A , A ): UpperCAmelCase : List[str] = 1 elif isinstance(A , A ): UpperCAmelCase : Dict = len(A ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(A )}.''' ) # get prompt text embeddings UpperCAmelCase : List[str] = self.tokenizer( A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCAmelCase : List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 ) UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCAmelCase : Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCAmelCase : List[str] if negative_prompt is None: UpperCAmelCase : Any = [""""""] elif type(A ) is not type(A ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !=''' f''' {type(A )}.''' ) elif isinstance(A , A ): UpperCAmelCase : Optional[int] = [negative_prompt] elif batch_size != len(A ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: UpperCAmelCase : Any = negative_prompt UpperCAmelCase : Dict = text_input_ids.shape[-1] UpperCAmelCase : List[Any] = self.tokenizer( A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , ) UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase : int = uncond_embeddings.shape[1] UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 ) UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) UpperCAmelCase : str = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCAmelCase : Dict = torch.randn( A , generator=A , device="""cpu""" , dtype=A ).to(self.device ) UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to( self.device ) else: UpperCAmelCase : int = torch.randn( A , generator=A , device=self.device , dtype=A ) UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A ) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCAmelCase : Optional[Any] = latents_reference.to(self.device ) UpperCAmelCase : Tuple = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2 UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2 UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx UpperCAmelCase : List[str] = 0 if dy < 0 else dy UpperCAmelCase : Union[str, Any] = max(-dx , 0 ) UpperCAmelCase : List[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(A ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCAmelCase : Optional[Any] = {} if accepts_eta: UpperCAmelCase : List[str] = eta for i, t in enumerate(self.progress_bar(A ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase : str = self.scheduler.scale_model_input(A , A ) # predict the noise residual UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample # perform guidance if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 ) UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A , A , A ) UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents UpperCAmelCase : Tuple = self.vae.decode(A ).sample UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to( self.device ) UpperCAmelCase , UpperCAmelCase : int = self.safety_checker( images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: UpperCAmelCase : Any = None if output_type == "pil": UpperCAmelCase : int = self.numpy_to_pil(A ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
265
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. a : Tuple = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class UpperCamelCase_ ( unittest.TestCase ): lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: lowercase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _lowercase( self , A , A , A ) -> Dict: UpperCAmelCase : Union[str, Any] = ZeroShotClassificationPipeline( model=A , tokenizer=A , candidate_labels=["""polics""", """health"""] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _lowercase( self , A , A ) -> Optional[int]: UpperCAmelCase : Dict = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # No kwarg UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , ["""politics"""] ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase : str = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" ) self.assertEqual( A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] ) self.assertEqual( A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) UpperCAmelCase : Dict = classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # https://github.com/huggingface/transformers/issues/13846 UpperCAmelCase : str = classifier(["""I am happy"""] , ["""positive""", """negative"""] ) self.assertEqual( A , [ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(1 ) ] , ) UpperCAmelCase : List[str] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] ) self.assertEqual( A , [ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(2 ) ] , ) with self.assertRaises(A ): classifier("""""" , candidate_labels="""politics""" ) with self.assertRaises(A ): classifier(A , candidate_labels="""politics""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" , candidate_labels=A ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A , ) self.run_entailment_id(A ) def _lowercase( self , A ) -> Any: UpperCAmelCase : Tuple = zero_shot_classifier.model.config UpperCAmelCase : Union[str, Any] = config.labelaid UpperCAmelCase : Tuple = zero_shot_classifier.entailment_id UpperCAmelCase : Any = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) UpperCAmelCase : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase : Any = {"""ENTAIL""": 0, """NON-ENTAIL""": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase : List[str] = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) UpperCAmelCase : Tuple = original_labelaid self.assertEqual(A , zero_shot_classifier.entailment_id ) @require_torch def _lowercase( self ) -> str: UpperCAmelCase : int = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( """Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] ) @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) UpperCAmelCase : Union[str, Any] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , ) UpperCAmelCase : List[Any] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[int] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" ) UpperCAmelCase : Optional[int] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase : str = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , ) self.assertEqual( nested_simplify(A ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def _lowercase( self ) -> List[str]: UpperCAmelCase : int = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" ) UpperCAmelCase : Tuple = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase : Any = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , ) self.assertEqual( nested_simplify(A ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
265
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class UpperCamelCase_ ( unittest.TestCase ): def __init__( self , A , A=7 , A=3 , A=30 , A=400 , A=True , A=None , A=True , A=1 / 255 , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=True , ) -> int: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p UpperCAmelCase : Optional[int] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} UpperCAmelCase : Union[str, Any] = parent UpperCAmelCase : List[Any] = batch_size UpperCAmelCase : List[Any] = num_channels UpperCAmelCase : Dict = min_resolution UpperCAmelCase : List[str] = max_resolution UpperCAmelCase : Optional[int] = do_resize UpperCAmelCase : Dict = size UpperCAmelCase : Union[str, Any] = do_rescale UpperCAmelCase : List[str] = rescale_factor UpperCAmelCase : List[str] = do_normalize UpperCAmelCase : Optional[int] = image_mean UpperCAmelCase : List[Any] = image_std UpperCAmelCase : Dict = do_pad def _lowercase( self ) -> str: return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def _lowercase( self , A , A=False ) -> Any: if not batched: UpperCAmelCase : Optional[int] = image_inputs[0] if isinstance(A , Image.Image ): UpperCAmelCase , UpperCAmelCase : Tuple = image.size else: UpperCAmelCase , UpperCAmelCase : Optional[int] = image.shape[1], image.shape[2] if w < h: UpperCAmelCase : List[Any] = int(self.size["""shortest_edge"""] * h / w ) UpperCAmelCase : Dict = self.size["""shortest_edge"""] elif w > h: UpperCAmelCase : Optional[Any] = self.size["""shortest_edge"""] UpperCAmelCase : Optional[Any] = int(self.size["""shortest_edge"""] * w / h ) else: UpperCAmelCase : Optional[int] = self.size["""shortest_edge"""] UpperCAmelCase : int = self.size["""shortest_edge"""] else: UpperCAmelCase : Optional[Any] = [] for image in image_inputs: UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase : Tuple = max(A , key=lambda A : item[0] )[0] UpperCAmelCase : List[str] = max(A , key=lambda A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = DetrImageProcessor if is_vision_available() else None def _lowercase( self ) -> Tuple: UpperCAmelCase : Dict = DetrImageProcessingTester(self ) @property def _lowercase( self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def _lowercase( self ) -> Any: UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A , """image_mean""" ) ) self.assertTrue(hasattr(A , """image_std""" ) ) self.assertTrue(hasattr(A , """do_normalize""" ) ) self.assertTrue(hasattr(A , """do_rescale""" ) ) self.assertTrue(hasattr(A , """rescale_factor""" ) ) self.assertTrue(hasattr(A , """do_resize""" ) ) self.assertTrue(hasattr(A , """size""" ) ) self.assertTrue(hasattr(A , """do_pad""" ) ) def _lowercase( self ) -> int: UpperCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad , A ) UpperCAmelCase : Tuple = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , A ) def _lowercase( self ) -> List[Any]: pass def _lowercase( self ) -> str: # Initialize image_processing UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A , Image.Image ) # Test not batched input UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values UpperCAmelCase , UpperCAmelCase : Dict = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.image_processor_tester.get_expected_values(A , batched=A ) UpperCAmelCase : str = image_processing(A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase( self ) -> Tuple: # Initialize image_processing UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A ) for image in image_inputs: self.assertIsInstance(A , np.ndarray ) # Test not batched input UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values UpperCAmelCase , UpperCAmelCase : Dict = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase : Optional[Any] = image_processing(A , return_tensors="""pt""" ).pixel_values UpperCAmelCase , UpperCAmelCase : Tuple = self.image_processor_tester.get_expected_values(A , batched=A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase( self ) -> Any: # Initialize image_processing UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A ) for image in image_inputs: self.assertIsInstance(A , torch.Tensor ) # Test not batched input UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values UpperCAmelCase , UpperCAmelCase : Tuple = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase : Union[str, Any] = image_processing(A , return_tensors="""pt""" ).pixel_values UpperCAmelCase , UpperCAmelCase : Dict = self.image_processor_tester.get_expected_values(A , batched=A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _lowercase( self ) -> Any: # prepare image and target UpperCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: UpperCAmelCase : Union[str, Any] = json.loads(f.read() ) UpperCAmelCase : int = {"""image_id""": 39769, """annotations""": target} # encode them UpperCAmelCase : Dict = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" ) UpperCAmelCase : Union[str, Any] = image_processing(images=A , annotations=A , return_tensors="""pt""" ) # verify pixel values UpperCAmelCase : List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , A ) UpperCAmelCase : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A , atol=1e-4 ) ) # verify area UpperCAmelCase : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A ) ) # verify boxes UpperCAmelCase : Tuple = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A ) UpperCAmelCase : Union[str, Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A , atol=1e-3 ) ) # verify image_id UpperCAmelCase : str = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A ) ) # verify is_crowd UpperCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A ) ) # verify class_labels UpperCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A ) ) # verify orig_size UpperCAmelCase : Dict = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A ) ) # verify size UpperCAmelCase : List[str] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A ) ) @slow def _lowercase( self ) -> List[str]: # prepare image, target and masks_path UpperCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: UpperCAmelCase : Union[str, Any] = json.loads(f.read() ) UpperCAmelCase : str = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target} UpperCAmelCase : List[Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them UpperCAmelCase : Any = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" ) UpperCAmelCase : Optional[int] = image_processing(images=A , annotations=A , masks_path=A , return_tensors="""pt""" ) # verify pixel values UpperCAmelCase : int = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , A ) UpperCAmelCase : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A , atol=1e-4 ) ) # verify area UpperCAmelCase : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A ) ) # verify boxes UpperCAmelCase : Any = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A ) UpperCAmelCase : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A , atol=1e-3 ) ) # verify image_id UpperCAmelCase : Dict = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A ) ) # verify is_crowd UpperCAmelCase : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A ) ) # verify class_labels UpperCAmelCase : Dict = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A ) ) # verify masks UpperCAmelCase : Union[str, Any] = 822873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , A ) # verify orig_size UpperCAmelCase : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A ) ) # verify size UpperCAmelCase : int = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A ) )
265
'''simple docstring''' import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder a : List[Any] = """__DUMMY_TRANSFORMERS_USER__""" a : Tuple = """Dummy User""" a : Optional[Any] = """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""" a : Optional[Any] = """https://hub-ci.huggingface.co""" a : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}""" a : Tuple = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}""" a : str = Path("""~/.huggingface/hub_ci_token""").expanduser() @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Optional[int]: monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> List[str]: monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _lowercase ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Any: monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]: HfFolder.save_token(_lowercase ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( ) -> str: return HfApi(endpoint=_lowercase ) @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : str = HfFolder.get_token() HfFolder.save_token(_lowercase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Any: def _cleanup_repo(_lowercase ): hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def __lowerCamelCase ( _lowercase ) -> List[str]: @contextmanager def _temporary_repo(_lowercase ): try: yield repo_id finally: cleanup_repo(_lowercase ) return _temporary_repo @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: UpperCAmelCase : str = F'''repo_txt_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : List[Any] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data/text_data.txt""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : Optional[int] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple: UpperCAmelCase : List[Any] = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : List[str] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: return hf_private_dataset_repo_zipped_img_data_
265
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class UpperCamelCase_ ( unittest.TestCase ): def __init__( self , A , A=7 , A=3 , A=30 , A=400 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=True , A=1 / 255 , A=True , ) -> Optional[Any]: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p UpperCAmelCase : Optional[int] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} UpperCAmelCase : Optional[Any] = parent UpperCAmelCase : Optional[Any] = batch_size UpperCAmelCase : Any = num_channels UpperCAmelCase : int = min_resolution UpperCAmelCase : List[Any] = max_resolution UpperCAmelCase : int = do_resize UpperCAmelCase : Union[str, Any] = size UpperCAmelCase : Any = do_normalize UpperCAmelCase : str = image_mean UpperCAmelCase : Any = image_std UpperCAmelCase : Union[str, Any] = do_rescale UpperCAmelCase : List[str] = rescale_factor UpperCAmelCase : List[Any] = do_pad def _lowercase( self ) -> List[str]: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _lowercase( self , A , A=False ) -> Dict: if not batched: UpperCAmelCase : str = image_inputs[0] if isinstance(A , Image.Image ): UpperCAmelCase , UpperCAmelCase : Tuple = image.size else: UpperCAmelCase , UpperCAmelCase : str = image.shape[1], image.shape[2] if w < h: UpperCAmelCase : Tuple = int(self.size["""shortest_edge"""] * h / w ) UpperCAmelCase : Any = self.size["""shortest_edge"""] elif w > h: UpperCAmelCase : Tuple = self.size["""shortest_edge"""] UpperCAmelCase : int = int(self.size["""shortest_edge"""] * w / h ) else: UpperCAmelCase : Any = self.size["""shortest_edge"""] UpperCAmelCase : Optional[int] = self.size["""shortest_edge"""] else: UpperCAmelCase : int = [] for image in image_inputs: UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase : List[Any] = max(A , key=lambda A : item[0] )[0] UpperCAmelCase : Dict = max(A , key=lambda A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = DetaImageProcessor if is_vision_available() else None def _lowercase( self ) -> Any: UpperCAmelCase : str = DetaImageProcessingTester(self ) @property def _lowercase( self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def _lowercase( self ) -> List[str]: UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A , """image_mean""" ) ) self.assertTrue(hasattr(A , """image_std""" ) ) self.assertTrue(hasattr(A , """do_normalize""" ) ) self.assertTrue(hasattr(A , """do_resize""" ) ) self.assertTrue(hasattr(A , """do_rescale""" ) ) self.assertTrue(hasattr(A , """do_pad""" ) ) self.assertTrue(hasattr(A , """size""" ) ) def _lowercase( self ) -> Tuple: UpperCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad , A ) def _lowercase( self ) -> int: pass def _lowercase( self ) -> Dict: # Initialize image_processing UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A , Image.Image ) # Test not batched input UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values UpperCAmelCase , UpperCAmelCase : Any = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase , UpperCAmelCase : Dict = self.image_processor_tester.get_expected_values(A , batched=A ) UpperCAmelCase : Union[str, Any] = image_processing(A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase( self ) -> List[Any]: # Initialize image_processing UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A ) for image in image_inputs: self.assertIsInstance(A , np.ndarray ) # Test not batched input UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values UpperCAmelCase , UpperCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase : List[str] = image_processing(A , return_tensors="""pt""" ).pixel_values UpperCAmelCase , UpperCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(A , batched=A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase( self ) -> str: # Initialize image_processing UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A ) for image in image_inputs: self.assertIsInstance(A , torch.Tensor ) # Test not batched input UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values UpperCAmelCase , UpperCAmelCase : int = self.image_processor_tester.get_expected_values(A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase : Dict = image_processing(A , return_tensors="""pt""" ).pixel_values UpperCAmelCase , UpperCAmelCase : Dict = self.image_processor_tester.get_expected_values(A , batched=A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _lowercase( self ) -> List[str]: # prepare image and target UpperCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: UpperCAmelCase : str = json.loads(f.read() ) UpperCAmelCase : List[str] = {"""image_id""": 39769, """annotations""": target} # encode them UpperCAmelCase : Union[str, Any] = DetaImageProcessor() UpperCAmelCase : Tuple = image_processing(images=A , annotations=A , return_tensors="""pt""" ) # verify pixel values UpperCAmelCase : List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , A ) UpperCAmelCase : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A , atol=1e-4 ) ) # verify area UpperCAmelCase : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A ) ) # verify boxes UpperCAmelCase : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A ) UpperCAmelCase : str = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A , atol=1e-3 ) ) # verify image_id UpperCAmelCase : Tuple = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A ) ) # verify is_crowd UpperCAmelCase : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A ) ) # verify class_labels UpperCAmelCase : Dict = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A ) ) # verify orig_size UpperCAmelCase : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A ) ) # verify size UpperCAmelCase : Dict = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A ) ) @slow def _lowercase( self ) -> int: # prepare image, target and masks_path UpperCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: UpperCAmelCase : Any = json.loads(f.read() ) UpperCAmelCase : int = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target} UpperCAmelCase : Union[str, Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them UpperCAmelCase : Optional[Any] = DetaImageProcessor(format="""coco_panoptic""" ) UpperCAmelCase : Union[str, Any] = image_processing(images=A , annotations=A , masks_path=A , return_tensors="""pt""" ) # verify pixel values UpperCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , A ) UpperCAmelCase : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A , atol=1e-4 ) ) # verify area UpperCAmelCase : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A ) ) # verify boxes UpperCAmelCase : List[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A ) UpperCAmelCase : Tuple = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A , atol=1e-3 ) ) # verify image_id UpperCAmelCase : Dict = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A ) ) # verify is_crowd UpperCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A ) ) # verify class_labels UpperCAmelCase : List[Any] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A ) ) # verify masks UpperCAmelCase : Any = 822873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , A ) # verify orig_size UpperCAmelCase : Any = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A ) ) # verify size UpperCAmelCase : Any = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A ) )
265
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax a : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(__magic_name__ ) class UpperCamelCase_ ( __magic_name__ ): def __init__( self , **A ) -> List[str]: super().__init__(**A ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self , A , **A ) -> Optional[Any]: return super().__call__(A , **A ) def _lowercase( self , **A ) -> Optional[Any]: UpperCAmelCase : List[Any] = {} if "candidate_labels" in kwargs: UpperCAmelCase : Dict = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: UpperCAmelCase : Optional[Any] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def _lowercase( self , A , A=None , A="This is a photo of {}." ) -> Optional[Any]: UpperCAmelCase : int = load_image(A ) UpperCAmelCase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) UpperCAmelCase : List[str] = candidate_labels UpperCAmelCase : Tuple = [hypothesis_template.format(A ) for x in candidate_labels] UpperCAmelCase : Union[str, Any] = self.tokenizer(A , return_tensors=self.framework , padding=A ) UpperCAmelCase : Union[str, Any] = [text_inputs] return inputs def _lowercase( self , A ) -> Optional[int]: UpperCAmelCase : List[Any] = model_inputs.pop("""candidate_labels""" ) UpperCAmelCase : Optional[Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , A ): UpperCAmelCase : Optional[Any] = text_inputs[0] else: # Batching case. UpperCAmelCase : Any = text_inputs[0][0] UpperCAmelCase : Dict = self.model(**A , **A ) UpperCAmelCase : List[Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def _lowercase( self , A ) -> Union[str, Any]: UpperCAmelCase : int = model_outputs.pop("""candidate_labels""" ) UpperCAmelCase : int = model_outputs["""logits"""][0] if self.framework == "pt": UpperCAmelCase : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 ) UpperCAmelCase : Any = probs.tolist() if not isinstance(A , A ): UpperCAmelCase : Any = [scores] elif self.framework == "tf": UpperCAmelCase : List[str] = stable_softmax(A , axis=-1 ) UpperCAmelCase : Union[str, Any] = probs.numpy().tolist() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) UpperCAmelCase : Any = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(A , A ) , key=lambda A : -x[0] ) ] return result
265
1
'''simple docstring''' from __future__ import annotations import typing from collections.abc import Iterable import numpy as np a : Union[str, Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 a : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007 def __lowerCamelCase ( _lowercase , _lowercase ) -> VectorOut: return np.sqrt(np.sum((np.asarray(_lowercase ) - np.asarray(_lowercase )) ** 2 ) ) def __lowerCamelCase ( _lowercase , _lowercase ) -> VectorOut: return sum((va - va) ** 2 for va, va in zip(_lowercase , _lowercase ) ) ** (1 / 2) if __name__ == "__main__": def __lowerCamelCase ( ) -> None: from timeit import timeit print("""Without Numpy""" ) print( timeit( """euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=1_0_0_0_0 , globals=globals() , ) ) print("""With Numpy""" ) print( timeit( """euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=1_0_0_0_0 , globals=globals() , ) ) benchmark()
265
'''simple docstring''' from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __lowerCamelCase ( _lowercase ) -> Optional[Any]: return getitem, k def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]: return setitem, k, v def __lowerCamelCase ( _lowercase ) -> int: return delitem, k def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]: try: return fun(_lowercase , *_lowercase ), None except Exception as e: return None, e a : List[str] = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) a : List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] a : int = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] a : List[Any] = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] a : Tuple = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] a : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( """operations""" , ( pytest.param(_add_items , id="""add items""" ), pytest.param(_overwrite_items , id="""overwrite items""" ), pytest.param(_delete_items , id="""delete items""" ), pytest.param(_access_absent_items , id="""access absent items""" ), pytest.param(_add_with_resize_up , id="""add with resize up""" ), pytest.param(_add_with_resize_down , id="""add with resize down""" ), ) , ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : List[str] = HashMap(initial_block_size=4 ) UpperCAmelCase : Dict = {} for _, (fun, *args) in enumerate(_lowercase ): UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase ) UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase ) assert my_res == py_res assert str(_lowercase ) == str(_lowercase ) assert set(_lowercase ) == set(_lowercase ) assert len(_lowercase ) == len(_lowercase ) assert set(my.items() ) == set(py.items() ) def __lowerCamelCase ( ) -> List[Any]: def is_public(_lowercase ) -> bool: return not name.startswith("""_""" ) UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )} UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )} assert dict_public_names > hash_public_names
265
1
'''simple docstring''' def __lowerCamelCase ( _lowercase = 1_0_0_0 ) -> int: return sum(e for e in range(3 , _lowercase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F'''{solution() = }''')
265
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right a : List[str] = 2_5_0_0_0_4 a : List[str] = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = MBartTokenizer lowercase = MBartTokenizerFast lowercase = True lowercase = True def _lowercase( self ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : str = MBartTokenizer(A , keep_accents=A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self ) -> int: UpperCAmelCase : Optional[Any] = MBartTokenizer(A , keep_accents=A ) UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual( A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def _lowercase( self ) -> Union[str, Any]: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : Optional[int] = tempfile.mkdtemp() UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A ) UpperCAmelCase : int = tokenizer_p.save_pretrained(A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=True UpperCAmelCase : Optional[int] = tempfile.mkdtemp() UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A ) UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A ) # Checks it save with the same files self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : Any = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=False UpperCAmelCase : Optional[Any] = tempfile.mkdtemp() UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A ) UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(A ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : str = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( unittest.TestCase ): lowercase = 'facebook/mbart-large-en-ro' lowercase = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] lowercase = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] lowercase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE] @classmethod def _lowercase( cls ) -> Tuple: UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" ) UpperCAmelCase : int = 1 return cls def _lowercase( self ) -> Union[str, Any]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , A ) def _lowercase( self ) -> List[str]: self.assertIn(A , self.tokenizer.all_special_ids ) UpperCAmelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] UpperCAmelCase : int = self.tokenizer.decode(A , skip_special_tokens=A ) UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A ) self.assertEqual(A , A ) self.assertNotIn(self.tokenizer.eos_token , A ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[str] = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , A ) UpperCAmelCase : int = 10 UpperCAmelCase : List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , A ) self.assertEqual(len(A ) , A ) def _lowercase( self ) -> Tuple: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] ) def _lowercase( self ) -> Dict: UpperCAmelCase : Any = tempfile.mkdtemp() UpperCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(A ) UpperCAmelCase : Tuple = MBartTokenizer.from_pretrained(A ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A ) @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" ) UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(A , A ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) UpperCAmelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , A ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" ) UpperCAmelCase : Dict = self.tokenizer( text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" ) UpperCAmelCase : Dict = targets["""input_ids"""] UpperCAmelCase : Union[str, Any] = shift_tokens_right(A , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[Any] = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(A ) , { # A, test, EOS, en_XX """input_ids""": [[62, 3034, 2, 250004]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 250001, } , )
265
1
'''simple docstring''' from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __lowerCamelCase ( _lowercase ) -> Optional[Any]: return getitem, k def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]: return setitem, k, v def __lowerCamelCase ( _lowercase ) -> int: return delitem, k def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]: try: return fun(_lowercase , *_lowercase ), None except Exception as e: return None, e a : List[str] = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) a : List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] a : int = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] a : List[Any] = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] a : Tuple = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] a : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( """operations""" , ( pytest.param(_add_items , id="""add items""" ), pytest.param(_overwrite_items , id="""overwrite items""" ), pytest.param(_delete_items , id="""delete items""" ), pytest.param(_access_absent_items , id="""access absent items""" ), pytest.param(_add_with_resize_up , id="""add with resize up""" ), pytest.param(_add_with_resize_down , id="""add with resize down""" ), ) , ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : List[str] = HashMap(initial_block_size=4 ) UpperCAmelCase : Dict = {} for _, (fun, *args) in enumerate(_lowercase ): UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase ) UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase ) assert my_res == py_res assert str(_lowercase ) == str(_lowercase ) assert set(_lowercase ) == set(_lowercase ) assert len(_lowercase ) == len(_lowercase ) assert set(my.items() ) == set(py.items() ) def __lowerCamelCase ( ) -> List[Any]: def is_public(_lowercase ) -> bool: return not name.startswith("""_""" ) UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )} UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )} assert dict_public_names > hash_public_names
265
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 a : List[str] = get_tests_dir("""fixtures""") class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: # A mock response for an HTTP head request to emulate server down UpperCAmelCase : Tuple = mock.Mock() UpperCAmelCase : List[str] = 500 UpperCAmelCase : Any = {} UpperCAmelCase : List[str] = HTTPError UpperCAmelCase : str = {} # Download this model to make sure it's in the cache. UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head: UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # This check we did call the fake head request mock_head.assert_called() def _lowercase( self ) -> Any: # This test is for deprecated behavior and can be removed in v5 UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" ) def _lowercase( self ) -> Union[str, Any]: with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" ) self.assertIsNotNone(A ) @is_staging_test class UpperCamelCase_ ( unittest.TestCase ): @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def _lowercase( cls ) -> List[str]: try: delete_repo(token=cls._token , repo_id="""test-image-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" ) except HTTPError: pass def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> List[str]: UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> Optional[int]: CustomImageProcessor.register_for_auto_class() UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
265
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a : str = logging.get_logger(__name__) a : int = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class UpperCamelCase_ ( __magic_name__ , __magic_name__ ): lowercase = 'focalnet' def __init__( self , A=224 , A=4 , A=3 , A=96 , A=False , A=[192, 384, 768, 768] , A=[2, 2, 6, 2] , A=[2, 2, 2, 2] , A=[3, 3, 3, 3] , A="gelu" , A=4.0 , A=0.0 , A=0.1 , A=False , A=1e-4 , A=False , A=False , A=False , A=0.0_2 , A=1e-5 , A=32 , A=None , A=None , **A , ) -> List[str]: super().__init__(**A ) UpperCAmelCase : Dict = image_size UpperCAmelCase : Dict = patch_size UpperCAmelCase : Optional[int] = num_channels UpperCAmelCase : Any = embed_dim UpperCAmelCase : int = use_conv_embed UpperCAmelCase : Optional[Any] = hidden_sizes UpperCAmelCase : List[Any] = depths UpperCAmelCase : Optional[int] = focal_levels UpperCAmelCase : Any = focal_windows UpperCAmelCase : int = hidden_act UpperCAmelCase : Optional[int] = mlp_ratio UpperCAmelCase : Optional[int] = hidden_dropout_prob UpperCAmelCase : Any = drop_path_rate UpperCAmelCase : str = use_layerscale UpperCAmelCase : Union[str, Any] = layerscale_value UpperCAmelCase : int = use_post_layernorm UpperCAmelCase : Tuple = use_post_layernorm_in_modulation UpperCAmelCase : str = normalize_modulator UpperCAmelCase : List[Any] = initializer_range UpperCAmelCase : Any = layer_norm_eps UpperCAmelCase : str = encoder_stride UpperCAmelCase : str = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] UpperCAmelCase , UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices( out_features=A , out_indices=A , stage_names=self.stage_names )
265
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def __lowerCamelCase ( _lowercase ) -> Tuple: return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Any = create_tensor(_lowercase ) UpperCAmelCase : Union[str, Any] = gather(_lowercase ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : Any = [state.process_index] UpperCAmelCase : Union[str, Any] = gather_object(_lowercase ) assert len(_lowercase ) == state.num_processes, F'''{gathered_obj}, {len(_lowercase )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}''' def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Optional[int] = create_tensor(_lowercase ) UpperCAmelCase : List[str] = broadcast(_lowercase ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Tuple: # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device ) else: UpperCAmelCase : Tuple = torch.arange(state.num_processes ).to(state.device ) UpperCAmelCase : Optional[Any] = pad_across_processes(_lowercase ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def __lowerCamelCase ( _lowercase ) -> Dict: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Optional[Any] = create_tensor(_lowercase ) UpperCAmelCase : Optional[Any] = reduce(_lowercase , """sum""" ) UpperCAmelCase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[Any]: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Tuple = create_tensor(_lowercase ) UpperCAmelCase : Optional[int] = reduce(_lowercase , """mean""" ) UpperCAmelCase : str = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[int]: # For xla_spawn (TPUs) main() def __lowerCamelCase ( ) -> int: UpperCAmelCase : List[Any] = PartialState() state.print(F'''State: {state}''' ) state.print("""testing gather""" ) test_gather(_lowercase ) state.print("""testing gather_object""" ) test_gather_object(_lowercase ) state.print("""testing broadcast""" ) test_broadcast(_lowercase ) state.print("""testing pad_across_processes""" ) test_pad_across_processes(_lowercase ) state.print("""testing reduce_sum""" ) test_reduce_sum(_lowercase ) state.print("""testing reduce_mean""" ) test_reduce_mean(_lowercase ) if __name__ == "__main__": main()
265
1
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging a : Optional[Any] = logging.get_logger(__name__) class UpperCamelCase_ ( __magic_name__ ): lowercase = ['audio_values', 'audio_mask'] def __init__( self , A=2048 , A=1 , A=[16, 16] , A=128 , A=44100 , A=86 , A=2048 , A=0.0 , **A , ) -> int: super().__init__( feature_size=A , sampling_rate=A , padding_value=A , **A , ) UpperCAmelCase : Dict = spectrogram_length UpperCAmelCase : Optional[Any] = num_channels UpperCAmelCase : Any = patch_size UpperCAmelCase : Dict = feature_size // self.patch_size[1] UpperCAmelCase : Optional[int] = n_fft UpperCAmelCase : Optional[int] = sampling_rate // hop_length_to_sampling_rate UpperCAmelCase : Any = sampling_rate UpperCAmelCase : int = padding_value UpperCAmelCase : int = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=A , norm="""slaney""" , mel_scale="""slaney""" , ).T def _lowercase( self , A ) -> np.ndarray: UpperCAmelCase : List[Any] = spectrogram( A , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=8_0.0 , ) UpperCAmelCase : Dict = log_spec[:, :-1] UpperCAmelCase : List[str] = log_spec - 2_0.0 UpperCAmelCase : List[Any] = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , A , A = None , A = True , A = None , A = False , A = False , **A , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' f''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) UpperCAmelCase : Union[str, Any] = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) UpperCAmelCase : Any = is_batched_numpy or ( isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(A , np.ndarray ): UpperCAmelCase : int = np.asarray(A , dtype=np.floataa ) elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase : Union[str, Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase : List[str] = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis UpperCAmelCase : List[str] = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , A ): UpperCAmelCase : Dict = [np.asarray(A , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask UpperCAmelCase : Tuple = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: UpperCAmelCase : Optional[Any] = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] UpperCAmelCase : int = np.array(A ).astype(np.floataa ) # convert into correct format for padding UpperCAmelCase : Any = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch UpperCAmelCase : List[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) UpperCAmelCase : Any = padded_audio_features * self.padding_value for i in range(len(A ) ): UpperCAmelCase : List[Any] = audio_features[i] UpperCAmelCase : Union[str, Any] = feature # return as BatchFeature if return_attention_mask: UpperCAmelCase : List[Any] = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: UpperCAmelCase : Any = {"""audio_values""": padded_audio_features} UpperCAmelCase : List[str] = BatchFeature(data=A , tensor_type=A ) return encoded_inputs
265
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ ( __magic_name__ ): def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]: super().__init__() self.register_modules( vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , ) def _lowercase( self , A = "auto" ) -> List[Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A ) def _lowercase( self ) -> Dict: self.enable_attention_slicing(A ) @torch.no_grad() def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]: if isinstance(A , A ): UpperCAmelCase : List[str] = 1 elif isinstance(A , A ): UpperCAmelCase : Dict = len(A ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(A )}.''' ) # get prompt text embeddings UpperCAmelCase : List[str] = self.tokenizer( A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCAmelCase : List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 ) UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCAmelCase : Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCAmelCase : List[str] if negative_prompt is None: UpperCAmelCase : Any = [""""""] elif type(A ) is not type(A ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !=''' f''' {type(A )}.''' ) elif isinstance(A , A ): UpperCAmelCase : Optional[int] = [negative_prompt] elif batch_size != len(A ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: UpperCAmelCase : Any = negative_prompt UpperCAmelCase : Dict = text_input_ids.shape[-1] UpperCAmelCase : List[Any] = self.tokenizer( A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , ) UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase : int = uncond_embeddings.shape[1] UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 ) UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) UpperCAmelCase : str = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCAmelCase : Dict = torch.randn( A , generator=A , device="""cpu""" , dtype=A ).to(self.device ) UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to( self.device ) else: UpperCAmelCase : int = torch.randn( A , generator=A , device=self.device , dtype=A ) UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A ) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCAmelCase : Optional[Any] = latents_reference.to(self.device ) UpperCAmelCase : Tuple = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2 UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2 UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx UpperCAmelCase : List[str] = 0 if dy < 0 else dy UpperCAmelCase : Union[str, Any] = max(-dx , 0 ) UpperCAmelCase : List[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(A ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCAmelCase : Optional[Any] = {} if accepts_eta: UpperCAmelCase : List[str] = eta for i, t in enumerate(self.progress_bar(A ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase : str = self.scheduler.scale_model_input(A , A ) # predict the noise residual UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample # perform guidance if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 ) UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A , A , A ) UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents UpperCAmelCase : Tuple = self.vae.decode(A ).sample UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to( self.device ) UpperCAmelCase , UpperCAmelCase : int = self.safety_checker( images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: UpperCAmelCase : Any = None if output_type == "pil": UpperCAmelCase : int = self.numpy_to_pil(A ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
265
1
'''simple docstring''' from __future__ import annotations from collections.abc import Generator def __lowerCamelCase ( ) -> Generator[int, None, None]: UpperCAmelCase : dict[int, int] = {} UpperCAmelCase : List[Any] = 2 while True: UpperCAmelCase : str = factor_map.pop(_lowercase , _lowercase ) if factor: UpperCAmelCase : List[str] = factor + prime while x in factor_map: x += factor UpperCAmelCase : List[str] = factor else: UpperCAmelCase : int = prime yield prime prime += 1 def __lowerCamelCase ( _lowercase = 1e10 ) -> int: UpperCAmelCase : Optional[Any] = sieve() UpperCAmelCase : str = 1 while True: UpperCAmelCase : Optional[int] = next(_lowercase ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(_lowercase ) n += 2 if __name__ == "__main__": print(solution())
265
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : def __init__( self , A , A=12 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=512 , A=0.0_2 , A=0 , A=None , ) -> Any: UpperCAmelCase : Optional[Any] = parent UpperCAmelCase : str = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : int = use_input_mask UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : Dict = vocab_size UpperCAmelCase : str = hidden_size UpperCAmelCase : List[Any] = projection_dim UpperCAmelCase : Tuple = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : Optional[Any] = intermediate_size UpperCAmelCase : Any = dropout UpperCAmelCase : List[Any] = attention_dropout UpperCAmelCase : Optional[Any] = max_position_embeddings UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Optional[Any] = scope UpperCAmelCase : Union[str, Any] = bos_token_id def _lowercase( self ) -> Tuple: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase : Tuple = input_mask.numpy() UpperCAmelCase , UpperCAmelCase : int = input_mask.shape UpperCAmelCase : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(A ): UpperCAmelCase : Tuple = 1 UpperCAmelCase : Optional[Any] = 0 UpperCAmelCase : int = self.get_config() return config, input_ids, tf.convert_to_tensor(A ) def _lowercase( self ) -> int: return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : int = TFBlipTextModel(config=A ) UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , training=A ) UpperCAmelCase : int = model(A , training=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Dict = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = (TFBlipTextModel,) if is_tf_available() else () lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> int: UpperCAmelCase : Union[str, Any] = BlipTextModelTester(self ) UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 ) def _lowercase( self ) -> Tuple: self.config_tester.run_common_tests() def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def _lowercase( self ) -> List[str]: pass def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def _lowercase( self ) -> Union[str, Any]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Dict: pass @slow def _lowercase( self ) -> Dict: for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Any = TFBlipTextModel.from_pretrained(A ) self.assertIsNotNone(A ) def _lowercase( self , A=True ) -> str: super().test_pt_tf_model_equivalence(allow_missing_keys=A )
265
1
'''simple docstring''' def __lowerCamelCase ( _lowercase ) -> str: return " ".join( """""".join(word[::-1] ) if len(_lowercase ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words("""Hey wollef sroirraw"""))
265
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification a : str = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co a : int = """main""" # Default branch name a : Any = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2""" # One particular commit (not the top of `main`) a : str = """aaaaaaa""" # This commit does not exist, so we should 404. a : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684""" # Sha-1 of config.json on the top of `main`, for checking purposes a : Any = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3""" @contextlib.contextmanager def __lowerCamelCase ( ) -> List[str]: print("""Welcome!""" ) yield print("""Bye!""" ) @contextlib.contextmanager def __lowerCamelCase ( ) -> Optional[int]: print("""Bonjour!""" ) yield print("""Au revoir!""" ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> List[Any]: # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec("""transformers""" ) is not None class UpperCamelCase_ ( unittest.TestCase ): @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Tuple: with ContextManagers([] ): print("""Transformers are awesome!""" ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Dict: with ContextManagers([context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Union[str, Any]: with ContextManagers([context_fr(), context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" ) @require_torch def _lowercase( self ) -> Optional[int]: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_tf def _lowercase( self ) -> int: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_flax def _lowercase( self ) -> Any: # Flax models don't have labels self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , [] )
265
1
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): a : str = """pt""" elif is_tf_available(): a : str = """tf""" else: a : Union[str, Any] = """jax""" class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = PerceiverTokenizer lowercase = False def _lowercase( self ) -> int: super().setUp() UpperCAmelCase : int = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _lowercase( self ) -> Any: return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" ) def _lowercase( self , **A ) -> PerceiverTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , A , A=False , A=20 , A=5 ) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. UpperCAmelCase : Any = [] for i in range(len(A ) ): try: UpperCAmelCase : Dict = tokenizer.decode([i] , clean_up_tokenization_spaces=A ) except UnicodeDecodeError: pass toks.append((i, tok) ) UpperCAmelCase : Union[str, Any] = list(filter(lambda A : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , A ) ) UpperCAmelCase : Union[str, Any] = list(filter(lambda A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=A ) , A ) ) if max_length is not None and len(A ) > max_length: UpperCAmelCase : List[Any] = toks[:max_length] if min_length is not None and len(A ) < min_length and len(A ) > 0: while len(A ) < min_length: UpperCAmelCase : List[Any] = toks + toks # toks_str = [t[1] for t in toks] UpperCAmelCase : List[Any] = [t[0] for t in toks] # Ensure consistency UpperCAmelCase : Optional[Any] = tokenizer.decode(A , clean_up_tokenization_spaces=A ) if " " not in output_txt and len(A ) > 1: UpperCAmelCase : int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=A ) + """ """ + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=A ) ) if with_prefix_space: UpperCAmelCase : Optional[Any] = """ """ + output_txt UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A ) return output_txt, output_ids def _lowercase( self ) -> Optional[int]: UpperCAmelCase : str = self.perceiver_tokenizer UpperCAmelCase : List[str] = """Unicode €.""" UpperCAmelCase : List[Any] = tokenizer(A ) UpperCAmelCase : List[str] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded["""input_ids"""] , A ) # decoding UpperCAmelCase : Tuple = tokenizer.decode(A ) self.assertEqual(A , """[CLS]Unicode €.[SEP]""" ) UpperCAmelCase : List[str] = tokenizer("""e è é ê ë""" ) UpperCAmelCase : List[Any] = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded["""input_ids"""] , A ) # decoding UpperCAmelCase : Tuple = tokenizer.decode(A ) self.assertEqual(A , """[CLS]e è é ê ë[SEP]""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" ) def _lowercase( self ) -> Dict: UpperCAmelCase : List[str] = self.perceiver_tokenizer UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] # fmt: off UpperCAmelCase : Dict = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on UpperCAmelCase : List[Any] = tokenizer(A , padding=A , return_tensors=A ) self.assertIsInstance(A , A ) if FRAMEWORK != "jax": UpperCAmelCase : List[str] = list(batch.input_ids.numpy()[0] ) else: UpperCAmelCase : List[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(A , A ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def _lowercase( self ) -> List[str]: UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer UpperCAmelCase : Optional[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] UpperCAmelCase : List[str] = tokenizer(A , padding=A , return_tensors=A ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""" , A ) self.assertIn("""attention_mask""" , A ) self.assertNotIn("""decoder_input_ids""" , A ) self.assertNotIn("""decoder_attention_mask""" , A ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : Tuple = self.perceiver_tokenizer UpperCAmelCase : int = [ """Summary of the text.""", """Another summary.""", ] UpperCAmelCase : List[str] = tokenizer( text_target=A , max_length=32 , padding="""max_length""" , truncation=A , return_tensors=A ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def _lowercase( self ) -> List[str]: # safety check on max_len default value so we are sure the test works UpperCAmelCase : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test UpperCAmelCase : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase : List[Any] = tempfile.mkdtemp() UpperCAmelCase : Tuple = """ He is very happy, UNwant\u00E9d,running""" UpperCAmelCase : Optional[int] = tokenizer.encode(A , add_special_tokens=A ) tokenizer.save_pretrained(A ) UpperCAmelCase : Union[str, Any] = tokenizer.__class__.from_pretrained(A ) UpperCAmelCase : Tuple = after_tokenizer.encode(A , add_special_tokens=A ) self.assertListEqual(A , A ) shutil.rmtree(A ) UpperCAmelCase : List[str] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase : List[str] = tempfile.mkdtemp() UpperCAmelCase : Tuple = """ He is very happy, UNwant\u00E9d,running""" tokenizer.add_tokens(["""bim""", """bambam"""] ) UpperCAmelCase : Optional[int] = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) UpperCAmelCase : Optional[int] = tokenizer.encode(A , add_special_tokens=A ) tokenizer.save_pretrained(A ) UpperCAmelCase : Dict = tokenizer.__class__.from_pretrained(A ) UpperCAmelCase : Tuple = after_tokenizer.encode(A , add_special_tokens=A ) self.assertListEqual(A , A ) self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(A , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(A ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : str = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(A ) with open(os.path.join(A , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: UpperCAmelCase : List[Any] = json.load(A ) with open(os.path.join(A , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: UpperCAmelCase : Dict = json.load(A ) UpperCAmelCase : Optional[Any] = [f'''<extra_id_{i}>''' for i in range(125 )] UpperCAmelCase : str = added_tokens_extra_ids + [ """an_additional_special_token""" ] UpperCAmelCase : Dict = added_tokens_extra_ids + [ """an_additional_special_token""" ] with open(os.path.join(A , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(A , A ) with open(os.path.join(A , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(A , A ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files UpperCAmelCase : List[Any] = tokenizer_class.from_pretrained( A , ) self.assertIn( """an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained UpperCAmelCase : str = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=A )] UpperCAmelCase : List[str] = tokenizer_class.from_pretrained( A , additional_special_tokens=A , ) self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , ) def _lowercase( self ) -> Dict: UpperCAmelCase : List[str] = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , """�""" ) def _lowercase( self ) -> Optional[int]: pass def _lowercase( self ) -> Any: pass def _lowercase( self ) -> List[str]: pass def _lowercase( self ) -> List[Any]: pass def _lowercase( self ) -> Tuple: # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens UpperCAmelCase : str = self.get_tokenizers(fast=A , do_lower_case=A ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase : Tuple = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""] UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_string(A ) self.assertIsInstance(A , A )
265
'''simple docstring''' from itertools import count def __lowerCamelCase ( _lowercase = 5_0 ) -> int: UpperCAmelCase : Any = [1] * min_block_length for n in count(_lowercase ): fill_count_functions.append(1 ) for block_length in range(_lowercase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_0_0_0_0_0_0: break return n if __name__ == "__main__": print(F'''{solution() = }''')
265
1
'''simple docstring''' a : Tuple = [ """DownloadConfig""", """DownloadManager""", """DownloadMode""", """StreamingDownloadManager""", ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
265
'''simple docstring''' from __future__ import annotations import math class UpperCamelCase_ : def __init__( self , A ) -> None: UpperCAmelCase : Optional[int] = size # approximate the overall size of segment tree with given value UpperCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )] # create array to store lazy update UpperCAmelCase : Any = [0 for i in range(0 , 4 * size )] UpperCAmelCase : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update def _lowercase( self , A ) -> int: return idx * 2 def _lowercase( self , A ) -> int: return idx * 2 + 1 def _lowercase( self , A , A , A , A ) -> None: if left_element == right_element: UpperCAmelCase : str = a[left_element - 1] else: UpperCAmelCase : Tuple = (left_element + right_element) // 2 self.build(self.left(A ) , A , A , A ) self.build(self.right(A ) , mid + 1 , A , A ) UpperCAmelCase : str = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) def _lowercase( self , A , A , A , A , A , A ) -> bool: if self.flag[idx] is True: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : int = False if left_element != right_element: UpperCAmelCase : List[str] = self.lazy[idx] UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : int = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: UpperCAmelCase : Optional[Any] = val if left_element != right_element: UpperCAmelCase : Tuple = val UpperCAmelCase : int = val UpperCAmelCase : Any = True UpperCAmelCase : str = True return True UpperCAmelCase : str = (left_element + right_element) // 2 self.update(self.left(A ) , A , A , A , A , A ) self.update(self.right(A ) , mid + 1 , A , A , A , A ) UpperCAmelCase : List[str] = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) return True def _lowercase( self , A , A , A , A , A ) -> int | float: if self.flag[idx] is True: UpperCAmelCase : Any = self.lazy[idx] UpperCAmelCase : Any = False if left_element != right_element: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : Tuple = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : Tuple = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] UpperCAmelCase : Dict = (left_element + right_element) // 2 UpperCAmelCase : List[Any] = self.query(self.left(A ) , A , A , A , A ) UpperCAmelCase : str = self.query(self.right(A ) , mid + 1 , A , A , A ) return max(A , A ) def __str__( self ) -> str: return str([self.query(1 , 1 , self.size , A , A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": a : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] a : Optional[Any] = 1_5 a : Union[str, Any] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
265
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer a : Dict = logging.get_logger(__name__) a : str = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} a : int = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } a : Optional[int] = { """roberta-base""": 5_1_2, """roberta-large""": 5_1_2, """roberta-large-mnli""": 5_1_2, """distilroberta-base""": 5_1_2, """roberta-base-openai-detector""": 5_1_2, """roberta-large-openai-detector""": 5_1_2, } class UpperCamelCase_ ( __magic_name__ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = RobertaTokenizer def __init__( self , A=None , A=None , A=None , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , A=True , **A , ) -> List[Any]: super().__init__( A , A , tokenizer_file=A , errors=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , add_prefix_space=A , trim_offsets=A , **A , ) UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , A ) != add_prefix_space: UpperCAmelCase : Union[str, Any] = getattr(A , pre_tok_state.pop("""type""" ) ) UpperCAmelCase : Dict = add_prefix_space UpperCAmelCase : Optional[Any] = pre_tok_class(**A ) UpperCAmelCase : List[Any] = add_prefix_space UpperCAmelCase : List[str] = """post_processor""" UpperCAmelCase : List[Any] = getattr(self.backend_tokenizer , A , A ) if tokenizer_component_instance: UpperCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCAmelCase : Any = tuple(state["""sep"""] ) if "cls" in state: UpperCAmelCase : Any = tuple(state["""cls"""] ) UpperCAmelCase : List[str] = False if state.get("""add_prefix_space""" , A ) != add_prefix_space: UpperCAmelCase : Any = add_prefix_space UpperCAmelCase : int = True if state.get("""trim_offsets""" , A ) != trim_offsets: UpperCAmelCase : Optional[int] = trim_offsets UpperCAmelCase : Tuple = True if changes_to_apply: UpperCAmelCase : Dict = getattr(A , state.pop("""type""" ) ) UpperCAmelCase : Union[str, Any] = component_class(**A ) setattr(self.backend_tokenizer , A , A ) @property def _lowercase( self ) -> str: if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def _lowercase( self , A ) -> Union[str, Any]: UpperCAmelCase : Optional[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else value UpperCAmelCase : Optional[Any] = value def _lowercase( self , *A , **A ) -> BatchEncoding: UpperCAmelCase : int = kwargs.get("""is_split_into_words""" , A ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*A , **A ) def _lowercase( self , *A , **A ) -> BatchEncoding: UpperCAmelCase : Any = kwargs.get("""is_split_into_words""" , A ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*A , **A ) def _lowercase( self , A , A = None ) -> Tuple[str]: UpperCAmelCase : int = self._tokenizer.model.save(A , name=A ) return tuple(A ) def _lowercase( self , A , A=None ) -> Optional[Any]: UpperCAmelCase : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowercase( self , A , A = None ) -> List[int]: UpperCAmelCase : int = [self.sep_token_id] UpperCAmelCase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
265
'''simple docstring''' from PIL import Image def __lowerCamelCase ( _lowercase , _lowercase ) -> Image: def brightness(_lowercase ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(_lowercase ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 a : Optional[Any] = change_brightness(img, 1_0_0) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
265
1
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> str: UpperCAmelCase : str = 0 def _lowercase( self ) -> Optional[int]: UpperCAmelCase : List[str] = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) self.assertIsInstance(A , A ) def _lowercase( self ) -> List[Any]: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Tuple = Path(A ) / """preprocessor_config.json""" UpperCAmelCase : str = Path(A ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(A , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(A , """w""" ) ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(A ) self.assertIsInstance(A , A ) def _lowercase( self ) -> int: # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : List[Any] = Path(A ) / """preprocessor_config.json""" UpperCAmelCase : int = Path(A ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(A , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(A , """w""" ) ) UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(A ) self.assertIsInstance(A , A ) def _lowercase( self ) -> Dict: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : str = CLIPConfig() # Create a dummy config file with image_proceesor_type UpperCAmelCase : Union[str, Any] = Path(A ) / """preprocessor_config.json""" UpperCAmelCase : List[Any] = Path(A ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(A , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(A , """w""" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally UpperCAmelCase : List[str] = AutoImageProcessor.from_pretrained(A ).to_dict() config_dict.pop("""image_processor_type""" ) UpperCAmelCase : List[Any] = CLIPImageProcessor(**A ) # save in new folder model_config.save_pretrained(A ) config.save_pretrained(A ) UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained(A ) # make sure private variable is not incorrectly saved UpperCAmelCase : int = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(A , A ) def _lowercase( self ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : str = Path(A ) / """preprocessor_config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(A , """w""" ) , ) UpperCAmelCase : List[Any] = AutoImageProcessor.from_pretrained(A ) self.assertIsInstance(A , A ) def _lowercase( self ) -> Union[str, Any]: with self.assertRaisesRegex( A , """clip-base is not a local folder and is not a valid model identifier""" ): UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""clip-base""" ) def _lowercase( self ) -> Dict: with self.assertRaisesRegex( A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(A , revision="""aaaaaa""" ) def _lowercase( self ) -> str: with self.assertRaisesRegex( A , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" ) def _lowercase( self ) -> Optional[int]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(A ): UpperCAmelCase : List[str] = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(A ): UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=A ) UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=A ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(A ) UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(A , trust_remote_code=A ) self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" ) def _lowercase( self ) -> List[Any]: try: AutoConfig.register("""custom""" , A ) AutoImageProcessor.register(A , A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(A ): AutoImageProcessor.register(A , A ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : List[Any] = Path(A ) / """preprocessor_config.json""" UpperCAmelCase : int = Path(A ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(A , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(A , """w""" ) ) UpperCAmelCase : List[Any] = CustomImageProcessor.from_pretrained(A ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(A ) UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(A ) self.assertIsInstance(A , A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _lowercase( self ) -> Dict: class UpperCamelCase_ ( __magic_name__ ): lowercase = True try: AutoConfig.register("""custom""" , A ) AutoImageProcessor.register(A , A ) # If remote code is not set, the default is to use local UpperCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=A ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=A ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(not hasattr(A , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
265
'''simple docstring''' import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class UpperCamelCase_ : def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Optional[int]: UpperCAmelCase : List[Any] = parent UpperCAmelCase : Optional[int] = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : Dict = use_input_mask UpperCAmelCase : str = use_token_type_ids UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Dict = hidden_size UpperCAmelCase : Dict = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : int = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : List[str] = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : str = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : List[str] = type_sequence_label_size UpperCAmelCase : int = initializer_range UpperCAmelCase : str = num_labels UpperCAmelCase : Optional[int] = num_choices UpperCAmelCase : Dict = scope UpperCAmelCase : Union[str, Any] = vocab_size - 1 def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Any = None if self.use_input_mask: UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : List[str] = None if self.use_labels: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Optional[int] = self.get_config() return config, input_ids, input_mask, token_labels def _lowercase( self ) -> Optional[Any]: return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.prepare_config_and_inputs() UpperCAmelCase : Any = True return config, input_ids, input_mask, token_labels def _lowercase( self , A , A , A ) -> int: UpperCAmelCase : str = GPTNeoXModel(config=A ) model.to(A ) model.eval() UpperCAmelCase : List[str] = model(A , attention_mask=A ) UpperCAmelCase : List[str] = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A ) -> Optional[int]: UpperCAmelCase : str = True UpperCAmelCase : Optional[Any] = GPTNeoXModel(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = model(A , attention_mask=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A , A ) -> List[str]: UpperCAmelCase : Tuple = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase( self , A , A , A , A ) -> Tuple: UpperCAmelCase : List[str] = self.num_labels UpperCAmelCase : Any = GPTNeoXForQuestionAnswering(A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase( self , A , A , A , A ) -> int: UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase( self , A , A , A , A ) -> str: UpperCAmelCase : List[Any] = self.num_labels UpperCAmelCase : Tuple = GPTNeoXForTokenClassification(A ) model.to(A ) model.eval() UpperCAmelCase : int = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = True UpperCAmelCase : str = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() # first forward pass UpperCAmelCase : List[str] = model(A , attention_mask=A , use_cache=A ) UpperCAmelCase : List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A ) UpperCAmelCase : Any = output_from_no_past["""hidden_states"""][0] UpperCAmelCase : List[str] = model( A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0] # select random slice UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) ) def _lowercase( self ) -> int: UpperCAmelCase : Tuple = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowercase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = GPTNeoXModelTester(self ) UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 ) def _lowercase( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> Optional[Any]: # This regression test was failing with PyTorch < 1.3 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A ) def _lowercase( self ) -> int: UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*A ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def _lowercase( self ) -> Any: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def _lowercase( self ) -> Optional[int]: pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def _lowercase( self , A ) -> str: UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : int = ids_tensor([1, 10] , config.vocab_size ) UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Dict = GPTNeoXModel(A ) original_model.to(A ) original_model.eval() UpperCAmelCase : List[str] = original_model(A ).last_hidden_state UpperCAmelCase : Any = original_model(A ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Any = {"""type""": scaling_type, """factor""": 1_0.0} UpperCAmelCase : str = GPTNeoXModel(A ) scaled_model.to(A ) scaled_model.eval() UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A , A , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: UpperCAmelCase : int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(A ) UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 UpperCAmelCase : List[str] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" UpperCAmelCase : Union[str, Any] = model.generate(**A , do_sample=A , max_new_tokens=20 ) UpperCAmelCase : Tuple = tokenizer.batch_decode(A )[0] self.assertEqual(A , A )
265
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( unittest.TestCase ): @slow def _lowercase( self ) -> int: UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=A ).to(A ) UpperCAmelCase : str = AutoTokenizer.from_pretrained("""google/mt5-small""" ) UpperCAmelCase : Optional[int] = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids UpperCAmelCase : Dict = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids UpperCAmelCase : List[str] = model(input_ids.to(A ) , labels=labels.to(A ) ).loss UpperCAmelCase : Optional[int] = -(labels.shape[-1] * loss.item()) UpperCAmelCase : Union[str, Any] = -8_4.9_1_2_7 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
265
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> int: if b == 0: return 1 if (b % 2) == 0: return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) else: return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) def __lowerCamelCase ( _lowercase , _lowercase ) -> float: if b < 0: return 1 / actual_power(_lowercase , _lowercase ) return actual_power(_lowercase , _lowercase ) if __name__ == "__main__": print(power(-2, -3))
265
1
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple: UpperCAmelCase : Union[str, Any] = """""" for i in table: res += inp[i - 1] return res def __lowerCamelCase ( _lowercase ) -> Dict: return data[1:] + data[0] def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]: UpperCAmelCase : Dict = """""" for i in range(len(_lowercase ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def __lowerCamelCase ( _lowercase , _lowercase ) -> Any: UpperCAmelCase : Dict = int("""0b""" + data[0] + data[-1] , 2 ) UpperCAmelCase : Optional[int] = int("""0b""" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : int = message[:4] UpperCAmelCase : int = message[4:] UpperCAmelCase : Tuple = apply_table(_lowercase , _lowercase ) UpperCAmelCase : Any = xor(_lowercase , _lowercase ) UpperCAmelCase : Optional[Any] = apply_sbox(_lowercase , temp[:4] ) # noqa: E741 UpperCAmelCase : Dict = apply_sbox(_lowercase , temp[4:] ) UpperCAmelCase : Optional[int] = """0""" * (2 - len(_lowercase )) + l # noqa: E741 UpperCAmelCase : Optional[Any] = """0""" * (2 - len(_lowercase )) + r UpperCAmelCase : str = apply_table(l + r , _lowercase ) UpperCAmelCase : int = xor(_lowercase , _lowercase ) return temp + right if __name__ == "__main__": a : Optional[int] = input("""Enter 10 bit key: """) a : Dict = input("""Enter 8 bit message: """) a : str = [6, 3, 7, 4, 8, 5, 1_0, 9] a : int = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6] a : Any = [2, 4, 3, 1] a : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7] a : List[Any] = [4, 1, 3, 5, 7, 2, 8, 6] a : Dict = [4, 1, 2, 3, 2, 3, 4, 1] a : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] a : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation a : int = apply_table(key, paa_table) a : Any = temp[:5] a : List[str] = temp[5:] a : Any = left_shift(left) a : int = left_shift(right) a : Optional[int] = apply_table(left + right, pa_table) a : Dict = left_shift(left) a : Optional[int] = left_shift(right) a : List[str] = left_shift(left) a : str = left_shift(right) a : Any = apply_table(left + right, pa_table) # encryption a : Dict = apply_table(message, IP) a : Any = function(expansion, sa, sa, keya, temp) a : List[str] = temp[4:] + temp[:4] a : Any = function(expansion, sa, sa, keya, temp) a : Union[str, Any] = apply_table(temp, IP_inv) print("""Cipher text is:""", CT) # decryption a : str = apply_table(CT, IP) a : str = function(expansion, sa, sa, keya, temp) a : Optional[Any] = temp[4:] + temp[:4] a : List[str] = function(expansion, sa, sa, keya, temp) a : str = apply_table(temp, IP_inv) print("""Plain text after decypting is:""", PT)
265
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : Any = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = AlbertTokenizer lowercase = AlbertTokenizerFast lowercase = True lowercase = True lowercase = True def _lowercase( self ) -> str: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : Optional[int] = AlbertTokenizer(A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self , A ) -> int: UpperCAmelCase : Optional[int] = """this is a test""" UpperCAmelCase : Dict = """this is a test""" return input_text, output_text def _lowercase( self ) -> int: UpperCAmelCase : Tuple = """<pad>""" UpperCAmelCase : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A ) def _lowercase( self ) -> Any: UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(A ) , 30000 ) def _lowercase( self ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _lowercase( self ) -> Union[str, Any]: if not self.test_rust_tokenizer: return UpperCAmelCase : int = self.get_tokenizer() UpperCAmelCase : List[str] = self.get_rust_tokenizer() UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé.""" UpperCAmelCase : str = tokenizer.tokenize(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A ) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A ) self.assertListEqual(A , A ) UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A ) self.assertListEqual(A , A ) def _lowercase( self ) -> Any: UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A ) UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : str = AlbertTokenizer(A ) UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" ) UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" ) UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def _lowercase( self ) -> Dict: # fmt: off UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
265
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a : str = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCamelCase_ ( __magic_name__ ): lowercase = ['pixel_values'] def __init__( self , A = True , A = None , A = PILImageResampling.BICUBIC , A = True , A = None , A = True , A = 1 / 255 , A = True , A = None , A = None , A = True , **A , ) -> None: super().__init__(**A ) UpperCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 224} UpperCAmelCase : int = get_size_dict(A , default_to_square=A ) UpperCAmelCase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} UpperCAmelCase : Dict = get_size_dict(A , default_to_square=A , param_name="""crop_size""" ) UpperCAmelCase : List[Any] = do_resize UpperCAmelCase : Optional[Any] = size UpperCAmelCase : Tuple = resample UpperCAmelCase : List[Any] = do_center_crop UpperCAmelCase : List[Any] = crop_size UpperCAmelCase : Any = do_rescale UpperCAmelCase : List[str] = rescale_factor UpperCAmelCase : List[Any] = do_normalize UpperCAmelCase : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN UpperCAmelCase : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD UpperCAmelCase : Optional[Any] = do_convert_rgb def _lowercase( self , A , A , A = PILImageResampling.BICUBIC , A = None , **A , ) -> np.ndarray: UpperCAmelCase : Dict = get_size_dict(A , default_to_square=A ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) UpperCAmelCase : str = get_resize_output_image_size(A , size=size["""shortest_edge"""] , default_to_square=A ) return resize(A , size=A , resample=A , data_format=A , **A ) def _lowercase( self , A , A , A = None , **A , ) -> np.ndarray: UpperCAmelCase : Tuple = get_size_dict(A ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A ) def _lowercase( self , A , A , A = None , **A , ) -> Dict: return rescale(A , scale=A , data_format=A , **A ) def _lowercase( self , A , A , A , A = None , **A , ) -> np.ndarray: return normalize(A , mean=A , std=A , data_format=A , **A ) def _lowercase( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image: UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize UpperCAmelCase : Optional[Any] = size if size is not None else self.size UpperCAmelCase : Dict = get_size_dict(A , param_name="""size""" , default_to_square=A ) UpperCAmelCase : Any = resample if resample is not None else self.resample UpperCAmelCase : Any = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase : int = crop_size if crop_size is not None else self.crop_size UpperCAmelCase : int = get_size_dict(A , param_name="""crop_size""" , default_to_square=A ) UpperCAmelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase : str = image_mean if image_mean is not None else self.image_mean UpperCAmelCase : Dict = image_std if image_std is not None else self.image_std UpperCAmelCase : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCAmelCase : Optional[int] = make_list_of_images(A ) if not valid_images(A ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCAmelCase : str = [convert_to_rgb(A ) for image in images] # All transformations expect numpy arrays. UpperCAmelCase : Optional[int] = [to_numpy_array(A ) for image in images] if do_resize: UpperCAmelCase : str = [self.resize(image=A , size=A , resample=A ) for image in images] if do_center_crop: UpperCAmelCase : List[str] = [self.center_crop(image=A , size=A ) for image in images] if do_rescale: UpperCAmelCase : Any = [self.rescale(image=A , scale=A ) for image in images] if do_normalize: UpperCAmelCase : Any = [self.normalize(image=A , mean=A , std=A ) for image in images] UpperCAmelCase : Optional[Any] = [to_channel_dimension_format(A , A ) for image in images] UpperCAmelCase : Optional[Any] = {"""pixel_values""": images} return BatchFeature(data=A , tensor_type=A )
265
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = StableDiffusionDiffEditPipeline lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} lowercase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase = frozenset([] ) def _lowercase( self ) -> Optional[int]: torch.manual_seed(0 ) UpperCAmelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , ) UpperCAmelCase : int = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , ) UpperCAmelCase : List[Any] = DDIMInverseScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , ) torch.manual_seed(0 ) UpperCAmelCase : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) UpperCAmelCase : Optional[Any] = CLIPTextModel(A ) UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCAmelCase : int = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _lowercase( self , A , A=0 ) -> Optional[Any]: UpperCAmelCase : Any = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A ) if str(A ).startswith("""mps""" ): UpperCAmelCase : List[Any] = torch.manual_seed(A ) else: UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : int = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> Optional[int]: UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : Any = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> str: UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : str = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def _lowercase( self ) -> List[Any]: if not hasattr(self.pipeline_class , """_optional_components""" ): return UpperCAmelCase : Dict = self.get_dummy_components() UpperCAmelCase : int = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A , A , A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) UpperCAmelCase : Any = self.get_dummy_inputs(A ) UpperCAmelCase : Optional[Any] = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) UpperCAmelCase : Dict = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A , A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A ) UpperCAmelCase : Tuple = pipe_loaded(**A )[0] UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max() self.assertLess(A , 1e-4 ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[int] = """cpu""" UpperCAmelCase : Optional[Any] = self.get_dummy_components() UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(A ) UpperCAmelCase : List[Any] = pipe.generate_mask(**A ) UpperCAmelCase : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) UpperCAmelCase : Optional[int] = np.array([0] * 9 ) UpperCAmelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = """cpu""" UpperCAmelCase : List[str] = self.get_dummy_components() UpperCAmelCase : Optional[Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : List[str] = pipe.invert(**A ).images UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Dict = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) def _lowercase( self ) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def _lowercase( self ) -> int: UpperCAmelCase : List[Any] = """cpu""" UpperCAmelCase : int = self.get_dummy_components() UpperCAmelCase : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""} UpperCAmelCase : int = DPMSolverMultistepScheduler(**A ) UpperCAmelCase : int = DPMSolverMultistepInverseScheduler(**A ) UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : Any = pipe.invert(**A ).images UpperCAmelCase : Dict = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Any = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) @require_torch_gpu @slow class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" ) UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) ) UpperCAmelCase : List[str] = raw_image def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Dict = torch.manual_seed(0 ) UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = """a bowl of fruit""" UpperCAmelCase : List[Any] = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Tuple = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents UpperCAmelCase : Any = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0] UpperCAmelCase : List[str] = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1 def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : int = """a bowl of fruit""" UpperCAmelCase : int = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Any = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents UpperCAmelCase : str = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0] UpperCAmelCase : Tuple = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1
265
1
'''simple docstring''' def __lowerCamelCase ( _lowercase ) -> bool: return str(_lowercase ) == str(_lowercase )[::-1] def __lowerCamelCase ( _lowercase ) -> int: return int(_lowercase ) + int(str(_lowercase )[::-1] ) def __lowerCamelCase ( _lowercase = 1_0_0_0_0 ) -> int: UpperCAmelCase : List[str] = [] for num in range(1 , _lowercase ): UpperCAmelCase : Dict = 0 UpperCAmelCase : Optional[Any] = num while iterations < 5_0: UpperCAmelCase : Optional[Any] = sum_reverse(_lowercase ) iterations += 1 if is_palindrome(_lowercase ): break else: lychrel_nums.append(_lowercase ) return len(_lowercase ) if __name__ == "__main__": print(F'''{solution() = }''')
265
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : Dict = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_lowercase , _lowercase ) def __lowerCamelCase ( _lowercase ) -> Tuple: UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape UpperCAmelCase : Dict = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) UpperCAmelCase : Optional[Any] = emb.weight.data return lin_layer def __lowerCamelCase ( _lowercase , _lowercase=None ) -> List[Any]: UpperCAmelCase : Dict = {} for old_key in state_dict.keys(): UpperCAmelCase : str = old_key if "moe_layer.experts." in key: if expert_idx is not None: UpperCAmelCase : int = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' ) else: UpperCAmelCase : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" ) if "gate" in key: UpperCAmelCase : str = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" ) if "fc2" and "experts" not in key: UpperCAmelCase : Tuple = key.replace(""".fc2.""" , """.ffn.fc2.""" ) if "fc1" and "experts" not in key: UpperCAmelCase : List[Any] = key.replace(""".fc1.""" , """.ffn.fc1.""" ) if ".encoder_attn." in key: UpperCAmelCase : Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" ) if "encoder_attn_layer_norm" in key: UpperCAmelCase : str = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" ) if "final_layer_norm" in key: UpperCAmelCase : Optional[int] = key.replace("""final_layer_norm""" , """ff_layer_norm""" ) UpperCAmelCase : str = state_dict[old_key] return new_dict def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ) -> Tuple: UpperCAmelCase : Any = [] UpperCAmelCase : Dict = 0 os.makedirs(_lowercase , exist_ok=_lowercase ) for expert in range(_lowercase ): UpperCAmelCase : str = switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(_lowercase ): UpperCAmelCase : Optional[Any] = torch.load(_lowercase )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : List[str] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : Optional[Any] = os.path.join( _lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) torch.save(_lowercase , _lowercase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_lowercase )[0]].dtype ) # Add the last block UpperCAmelCase : Optional[Any] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) UpperCAmelCase : Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : Optional[Any] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : int = shared_weights["""decoder.embed_tokens.weight"""] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_lowercase ) == 1: UpperCAmelCase : Dict = os.path.join(_lowercase , _lowercase ) torch.save(_lowercase , _lowercase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_lowercase , _lowercase ) # Otherwise, let's build the index UpperCAmelCase : Optional[int] = {} for idx, shard in enumerate(_lowercase ): UpperCAmelCase : List[str] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase ):05d}.bin''' ) UpperCAmelCase : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_lowercase , os.path.join(_lowercase , _lowercase ) ) for key in shard: UpperCAmelCase : Tuple = shard_file # Add the metadata UpperCAmelCase : Any = {"""total_size""": total_size} UpperCAmelCase : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(_lowercase , _lowercase ) , """w""" , encoding="""utf-8""" ) as f: UpperCAmelCase : str = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + """\n""" f.write(_lowercase ) return metadata, index if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) a : int = parser.parse_args() a , a : Any = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) a : str = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) a : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
265
1
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = LongformerTokenizer lowercase = True lowercase = LongformerTokenizerFast lowercase = True def _lowercase( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase : List[str] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCAmelCase : int = dict(zip(A , range(len(A ) ) ) ) UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A ) ) def _lowercase( self , **A ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , **A ) -> int: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , A ) -> Optional[int]: UpperCAmelCase : Optional[Any] = """lower newer""" UpperCAmelCase : Optional[int] = """lower newer""" return input_text, output_text def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase : Dict = """lower newer""" UpperCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] UpperCAmelCase : Tuple = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokens + [tokenizer.unk_token] UpperCAmelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" ) UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A ) UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A ) UpperCAmelCase : List[str] = tokenizer.encode( """sequence builders""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : List[str] = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = self.get_tokenizer() UpperCAmelCase : List[Any] = """Encode this sequence.""" UpperCAmelCase : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments UpperCAmelCase : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A , A ) UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A , A ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A , A ) # Testing spaces after special tokens UpperCAmelCase : Union[str, Any] = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) UpperCAmelCase : Union[str, Any] = """Encode <mask> sequence""" UpperCAmelCase : Union[str, Any] = """Encode <mask>sequence""" UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Union[str, Any] = encoded.index(A ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A , A ) UpperCAmelCase : Tuple = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = encoded.index(A ) UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A , A ) def _lowercase( self ) -> Optional[int]: pass def _lowercase( self ) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence.""" UpperCAmelCase : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A ) UpperCAmelCase : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def _lowercase( self ) -> List[Any]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A ) self.assertEqual(post_processor_state["""add_prefix_space"""] , A ) self.assertEqual(post_processor_state["""trim_offsets"""] , A ) def _lowercase( self ) -> Optional[Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` UpperCAmelCase : int = f'''{text_of_1_token} {text_of_1_token}''' UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , ) UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , ) UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[Any] = f''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , ) UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
265
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a : List[Any] = logging.get_logger(__name__) a : Union[str, Any] = torch.device("""cpu""") def __lowerCamelCase ( ) -> Any: UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) return im def __lowerCamelCase ( _lowercase ) -> Dict: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str: UpperCAmelCase : Union[str, Any] = dct.pop(_lowercase ) UpperCAmelCase : str = val def __lowerCamelCase ( _lowercase ) -> List[str]: UpperCAmelCase : Tuple = [] for k in state_dict.keys(): UpperCAmelCase : Dict = k if ".pwconv" in k: UpperCAmelCase : Union[str, Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: UpperCAmelCase : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: UpperCAmelCase : Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: UpperCAmelCase : Optional[Any] = k_new.split(""".""" ) if ls[2].isdigit(): UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[Any] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size UpperCAmelCase : List[Any] = 1_0_0_0 UpperCAmelCase : List[str] = """huggingface/label-files""" UpperCAmelCase : Tuple = """imagenet-1k-id2label.json""" UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase : Tuple = {int(_lowercase ): v for k, v in idalabel.items()} UpperCAmelCase : Tuple = idalabel UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": UpperCAmelCase : List[Any] = [3, 3, 6, 4] UpperCAmelCase : int = [4_8, 5_6, 1_1_2, 2_2_0] elif swiftformer_name == "swiftformer_s": UpperCAmelCase : str = [3, 3, 9, 6] UpperCAmelCase : str = [4_8, 6_4, 1_6_8, 2_2_4] elif swiftformer_name == "swiftformer_l1": UpperCAmelCase : List[Any] = [4, 3, 1_0, 5] UpperCAmelCase : Union[str, Any] = [4_8, 9_6, 1_9_2, 3_8_4] elif swiftformer_name == "swiftformer_l3": UpperCAmelCase : Any = [4, 4, 1_2, 6] UpperCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase ) else: UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" ) UpperCAmelCase : str = checkpoint UpperCAmelCase : Tuple = create_rename_keys(_lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) # load HuggingFace model UpperCAmelCase : str = SwiftFormerForImageClassification(_lowercase ).eval() hf_model.load_state_dict(_lowercase ) # prepare test inputs UpperCAmelCase : Any = prepare_img() UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) UpperCAmelCase : List[str] = processor(images=_lowercase , return_tensors="""pt""" ) # compare outputs from both models UpperCAmelCase : List[str] = get_expected_output(_lowercase ) UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1_0_0_0] ) assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' ) hf_model.save_pretrained(_lowercase ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swiftformer_name""", default="""swiftformer_xs""", choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""], type=str, help="""Name of the SwiftFormer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""./converted_outputs/""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""") a : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
265
1
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class UpperCamelCase_ : def __init__( self , A , A=13 , A=10 , A=3 , A=2 , A=2 , A=2 , A=True , A=True , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=10 , A=0.0_2 , A=0.9 , A=None , ) -> Optional[Any]: UpperCAmelCase : str = parent UpperCAmelCase : Optional[int] = batch_size UpperCAmelCase : List[Any] = image_size UpperCAmelCase : Optional[int] = num_channels UpperCAmelCase : List[Any] = patch_size UpperCAmelCase : Optional[Any] = tubelet_size UpperCAmelCase : Dict = num_frames UpperCAmelCase : Optional[int] = is_training UpperCAmelCase : Dict = use_labels UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : Any = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : Optional[Any] = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : int = hidden_dropout_prob UpperCAmelCase : List[Any] = attention_probs_dropout_prob UpperCAmelCase : Dict = type_sequence_label_size UpperCAmelCase : Dict = initializer_range UpperCAmelCase : Optional[Any] = mask_ratio UpperCAmelCase : Union[str, Any] = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame UpperCAmelCase : str = (image_size // patch_size) ** 2 UpperCAmelCase : List[Any] = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos UpperCAmelCase : Optional[Any] = int(mask_ratio * self.seq_length ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : Tuple = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Dict = None if self.use_labels: UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : List[Any] = self.get_config() return config, pixel_values, labels def _lowercase( self ) -> Optional[Any]: return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , ) def _lowercase( self , A , A , A ) -> Optional[Any]: UpperCAmelCase : str = VideoMAEModel(config=A ) model.to(A ) model.eval() UpperCAmelCase : Union[str, Any] = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A ) -> Optional[int]: UpperCAmelCase : List[str] = VideoMAEForPreTraining(A ) model.to(A ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch UpperCAmelCase : int = torch.ones((self.num_masks,) ) UpperCAmelCase : Tuple = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) UpperCAmelCase : List[Any] = mask.expand(self.batch_size , -1 ).bool() UpperCAmelCase : Optional[Any] = model(A , A ) # model only returns predictions for masked patches UpperCAmelCase : str = mask.sum().item() UpperCAmelCase : Tuple = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs UpperCAmelCase : Dict = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) lowercase = ( {'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification} if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : List[Any] = VideoMAEModelTester(self ) UpperCAmelCase : str = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 ) def _lowercase( self , A , A , A=False ) -> int: UpperCAmelCase : List[str] = copy.deepcopy(A ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch UpperCAmelCase : Optional[Any] = torch.ones((self.model_tester.num_masks,) ) UpperCAmelCase : int = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) UpperCAmelCase : List[Any] = mask.expand(self.model_tester.batch_size , -1 ).bool() UpperCAmelCase : Tuple = bool_masked_pos.to(A ) if return_labels: if model_class in [ *get_values(A ), ]: UpperCAmelCase : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=A ) return inputs_dict def _lowercase( self ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason="""VideoMAE does not use inputs_embeds""" ) def _lowercase( self ) -> int: pass def _lowercase( self ) -> Dict: UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Union[str, Any] = model_class(A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A , nn.Linear ) ) def _lowercase( self ) -> Dict: UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Any = model_class(A ) UpperCAmelCase : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()] UpperCAmelCase : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , A ) def _lowercase( self ) -> Tuple: UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def _lowercase( self ) -> List[str]: UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*A ) @slow def _lowercase( self ) -> int: for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Dict = VideoMAEModel.from_pretrained(A ) self.assertIsNotNone(A ) def _lowercase( self ) -> Optional[int]: if not self.has_attentions: pass else: UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : List[Any] = True for model_class in self.all_model_classes: UpperCAmelCase : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks UpperCAmelCase : Tuple = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) UpperCAmelCase : List[Any] = True UpperCAmelCase : List[str] = False UpperCAmelCase : str = True UpperCAmelCase : Any = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(A , A ) ) UpperCAmelCase : Optional[int] = outputs.attentions self.assertEqual(len(A ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCAmelCase : Optional[Any] = True UpperCAmelCase : Tuple = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): UpperCAmelCase : Any = model(**self._prepare_for_class(A , A ) ) UpperCAmelCase : int = outputs.attentions self.assertEqual(len(A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) UpperCAmelCase : Optional[int] = len(A ) # Check attention is always last and order is fine UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : List[str] = True UpperCAmelCase : Any = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(A , A ) ) self.assertEqual(out_len + 1 , len(A ) ) UpperCAmelCase : Any = outputs.attentions self.assertEqual(len(A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _lowercase( self ) -> Any: def check_hidden_states_output(A , A , A ): UpperCAmelCase : str = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): UpperCAmelCase : List[str] = model(**self._prepare_for_class(A , A ) ) UpperCAmelCase : int = outputs.hidden_states UpperCAmelCase : Dict = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(A ) , A ) UpperCAmelCase : Optional[int] = self.model_tester.seq_length - self.model_tester.num_masks UpperCAmelCase : List[Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : List[Any] = True check_hidden_states_output(A , A , A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase : Optional[Any] = True check_hidden_states_output(A , A , A ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _lowercase( self ) -> str: pass def __lowerCamelCase ( ) -> Optional[int]: UpperCAmelCase : int = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) UpperCAmelCase : List[Any] = np.load(_lowercase ) return list(_lowercase ) @require_torch @require_vision class UpperCamelCase_ ( unittest.TestCase ): @cached_property def _lowercase( self ) -> List[str]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : List[str] = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to( A ) UpperCAmelCase : Optional[int] = self.default_image_processor UpperCAmelCase : Optional[int] = prepare_video() UpperCAmelCase : Dict = image_processor(A , return_tensors="""pt""" ).to(A ) # forward pass with torch.no_grad(): UpperCAmelCase : Optional[int] = model(**A ) # verify the logits UpperCAmelCase : Optional[Any] = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , A ) UpperCAmelCase : int = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) ) @slow def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[Any] = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(A ) UpperCAmelCase : Dict = self.default_image_processor UpperCAmelCase : Dict = prepare_video() UpperCAmelCase : Optional[Any] = image_processor(A , return_tensors="""pt""" ).to(A ) # add boolean mask, indicating which patches to mask UpperCAmelCase : str = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" ) UpperCAmelCase : str = torch.load(A ) # forward pass with torch.no_grad(): UpperCAmelCase : Optional[Any] = model(**A ) # verify the logits UpperCAmelCase : Union[str, Any] = torch.Size([1, 1408, 1536] ) UpperCAmelCase : int = torch.tensor( [[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=A ) self.assertEqual(outputs.logits.shape , A ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , A , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) UpperCAmelCase : Union[str, Any] = torch.tensor([0.5_1_4_2] , device=A ) self.assertTrue(torch.allclose(outputs.loss , A , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) UpperCAmelCase : List[Any] = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=A ).to( A ) with torch.no_grad(): UpperCAmelCase : str = model(**A ) UpperCAmelCase : str = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=A ) self.assertTrue(torch.allclose(outputs.loss , A , atol=1e-4 ) )
265
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def __lowerCamelCase ( ) -> Any: raise RuntimeError("""CUDA out of memory.""" ) class UpperCamelCase_ ( nn.Module ): def __init__( self ) -> Any: super().__init__() UpperCAmelCase : Tuple = nn.Linear(3 , 4 ) UpperCAmelCase : Tuple = nn.BatchNormad(4 ) UpperCAmelCase : int = nn.Linear(4 , 5 ) def _lowercase( self , A ) -> Any: return self.lineara(self.batchnorm(self.lineara(A ) ) ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[int] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(A , [128, 64, 32, 16, 8] ) def _lowercase( self ) -> Any: UpperCAmelCase : Optional[Any] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A , A ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga UpperCAmelCase , UpperCAmelCase : Optional[int] = mock_training_loop_function("""hello""" ) self.assertListEqual(A , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, """hello"""] ) def _lowercase( self ) -> Any: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(A ): pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _lowercase( self ) -> Optional[int]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _lowercase( self ) -> Optional[Any]: @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A , A , A ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(A ) as cm: mock_training_loop_function(128 , """hello""" , """world""" ) self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] ) self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] ) def _lowercase( self ) -> int: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A ): raise ValueError("""Oops, we had an error!""" ) with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] ) @require_cuda def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated() UpperCAmelCase : List[str] = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , A ) UpperCAmelCase : Tuple = release_memory(A ) self.assertEqual(torch.cuda.memory_allocated() , A )
265
1
'''simple docstring''' from __future__ import annotations import math class UpperCamelCase_ : def __init__( self , A ) -> None: UpperCAmelCase : Optional[int] = size # approximate the overall size of segment tree with given value UpperCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )] # create array to store lazy update UpperCAmelCase : Any = [0 for i in range(0 , 4 * size )] UpperCAmelCase : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update def _lowercase( self , A ) -> int: return idx * 2 def _lowercase( self , A ) -> int: return idx * 2 + 1 def _lowercase( self , A , A , A , A ) -> None: if left_element == right_element: UpperCAmelCase : str = a[left_element - 1] else: UpperCAmelCase : Tuple = (left_element + right_element) // 2 self.build(self.left(A ) , A , A , A ) self.build(self.right(A ) , mid + 1 , A , A ) UpperCAmelCase : str = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) def _lowercase( self , A , A , A , A , A , A ) -> bool: if self.flag[idx] is True: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : int = False if left_element != right_element: UpperCAmelCase : List[str] = self.lazy[idx] UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : int = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: UpperCAmelCase : Optional[Any] = val if left_element != right_element: UpperCAmelCase : Tuple = val UpperCAmelCase : int = val UpperCAmelCase : Any = True UpperCAmelCase : str = True return True UpperCAmelCase : str = (left_element + right_element) // 2 self.update(self.left(A ) , A , A , A , A , A ) self.update(self.right(A ) , mid + 1 , A , A , A , A ) UpperCAmelCase : List[str] = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) return True def _lowercase( self , A , A , A , A , A ) -> int | float: if self.flag[idx] is True: UpperCAmelCase : Any = self.lazy[idx] UpperCAmelCase : Any = False if left_element != right_element: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : Tuple = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : Tuple = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] UpperCAmelCase : Dict = (left_element + right_element) // 2 UpperCAmelCase : List[Any] = self.query(self.left(A ) , A , A , A , A ) UpperCAmelCase : str = self.query(self.right(A ) , mid + 1 , A , A , A ) return max(A , A ) def __str__( self ) -> str: return str([self.query(1 , 1 , self.size , A , A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": a : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] a : Optional[Any] = 1_5 a : Union[str, Any] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
265
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Optional[int] = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class UpperCamelCase_ ( unittest.TestCase ): def __init__( self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=None , ) -> Union[str, Any]: UpperCAmelCase : List[str] = size if size is not None else {"""shortest_edge""": 20} UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} UpperCAmelCase : Optional[int] = parent UpperCAmelCase : Optional[Any] = batch_size UpperCAmelCase : Dict = num_channels UpperCAmelCase : Dict = image_size UpperCAmelCase : str = min_resolution UpperCAmelCase : List[Any] = max_resolution UpperCAmelCase : Optional[Any] = do_resize UpperCAmelCase : List[str] = size UpperCAmelCase : int = do_center_crop UpperCAmelCase : List[str] = crop_size def _lowercase( self ) -> int: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = MobileNetVaImageProcessor if is_vision_available() else None def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[int] = MobileNetVaImageProcessingTester(self ) @property def _lowercase( self ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def _lowercase( self ) -> List[str]: UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A , """do_resize""" ) ) self.assertTrue(hasattr(A , """size""" ) ) self.assertTrue(hasattr(A , """do_center_crop""" ) ) self.assertTrue(hasattr(A , """crop_size""" ) ) def _lowercase( self ) -> Tuple: UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def _lowercase( self ) -> Optional[Any]: pass def _lowercase( self ) -> List[str]: # Initialize image_processing UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A , Image.Image ) # Test not batched input UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCAmelCase : int = image_processing(A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def _lowercase( self ) -> List[str]: # Initialize image_processing UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A ) for image in image_inputs: self.assertIsInstance(A , np.ndarray ) # Test not batched input UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCAmelCase : Any = image_processing(A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def _lowercase( self ) -> int: # Initialize image_processing UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A ) for image in image_inputs: self.assertIsInstance(A , torch.Tensor ) # Test not batched input UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCAmelCase : List[Any] = image_processing(A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
265
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = LongformerTokenizer lowercase = True lowercase = LongformerTokenizerFast lowercase = True def _lowercase( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase : List[str] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCAmelCase : int = dict(zip(A , range(len(A ) ) ) ) UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A ) ) def _lowercase( self , **A ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , **A ) -> int: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , A ) -> Optional[int]: UpperCAmelCase : Optional[Any] = """lower newer""" UpperCAmelCase : Optional[int] = """lower newer""" return input_text, output_text def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase : Dict = """lower newer""" UpperCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] UpperCAmelCase : Tuple = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokens + [tokenizer.unk_token] UpperCAmelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" ) UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A ) UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A ) UpperCAmelCase : List[str] = tokenizer.encode( """sequence builders""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : List[str] = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = self.get_tokenizer() UpperCAmelCase : List[Any] = """Encode this sequence.""" UpperCAmelCase : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments UpperCAmelCase : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A , A ) UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A , A ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A , A ) # Testing spaces after special tokens UpperCAmelCase : Union[str, Any] = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) UpperCAmelCase : Union[str, Any] = """Encode <mask> sequence""" UpperCAmelCase : Union[str, Any] = """Encode <mask>sequence""" UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Union[str, Any] = encoded.index(A ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A , A ) UpperCAmelCase : Tuple = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = encoded.index(A ) UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A , A ) def _lowercase( self ) -> Optional[int]: pass def _lowercase( self ) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence.""" UpperCAmelCase : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A ) UpperCAmelCase : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def _lowercase( self ) -> List[Any]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A ) self.assertEqual(post_processor_state["""add_prefix_space"""] , A ) self.assertEqual(post_processor_state["""trim_offsets"""] , A ) def _lowercase( self ) -> Optional[Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` UpperCAmelCase : int = f'''{text_of_1_token} {text_of_1_token}''' UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , ) UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , ) UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[Any] = f''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , ) UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
265
1
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : Dict = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_lowercase , _lowercase ) def __lowerCamelCase ( _lowercase ) -> Tuple: UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape UpperCAmelCase : Dict = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) UpperCAmelCase : Optional[Any] = emb.weight.data return lin_layer def __lowerCamelCase ( _lowercase , _lowercase=None ) -> List[Any]: UpperCAmelCase : Dict = {} for old_key in state_dict.keys(): UpperCAmelCase : str = old_key if "moe_layer.experts." in key: if expert_idx is not None: UpperCAmelCase : int = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' ) else: UpperCAmelCase : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" ) if "gate" in key: UpperCAmelCase : str = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" ) if "fc2" and "experts" not in key: UpperCAmelCase : Tuple = key.replace(""".fc2.""" , """.ffn.fc2.""" ) if "fc1" and "experts" not in key: UpperCAmelCase : List[Any] = key.replace(""".fc1.""" , """.ffn.fc1.""" ) if ".encoder_attn." in key: UpperCAmelCase : Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" ) if "encoder_attn_layer_norm" in key: UpperCAmelCase : str = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" ) if "final_layer_norm" in key: UpperCAmelCase : Optional[int] = key.replace("""final_layer_norm""" , """ff_layer_norm""" ) UpperCAmelCase : str = state_dict[old_key] return new_dict def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ) -> Tuple: UpperCAmelCase : Any = [] UpperCAmelCase : Dict = 0 os.makedirs(_lowercase , exist_ok=_lowercase ) for expert in range(_lowercase ): UpperCAmelCase : str = switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(_lowercase ): UpperCAmelCase : Optional[Any] = torch.load(_lowercase )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : List[str] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : Optional[Any] = os.path.join( _lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) torch.save(_lowercase , _lowercase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_lowercase )[0]].dtype ) # Add the last block UpperCAmelCase : Optional[Any] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) UpperCAmelCase : Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : Optional[Any] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : int = shared_weights["""decoder.embed_tokens.weight"""] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_lowercase ) == 1: UpperCAmelCase : Dict = os.path.join(_lowercase , _lowercase ) torch.save(_lowercase , _lowercase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_lowercase , _lowercase ) # Otherwise, let's build the index UpperCAmelCase : Optional[int] = {} for idx, shard in enumerate(_lowercase ): UpperCAmelCase : List[str] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase ):05d}.bin''' ) UpperCAmelCase : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_lowercase , os.path.join(_lowercase , _lowercase ) ) for key in shard: UpperCAmelCase : Tuple = shard_file # Add the metadata UpperCAmelCase : Any = {"""total_size""": total_size} UpperCAmelCase : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(_lowercase , _lowercase ) , """w""" , encoding="""utf-8""" ) as f: UpperCAmelCase : str = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + """\n""" f.write(_lowercase ) return metadata, index if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) a : int = parser.parse_args() a , a : Any = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) a : str = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) a : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
265
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. a : Tuple = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class UpperCamelCase_ ( unittest.TestCase ): lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: lowercase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _lowercase( self , A , A , A ) -> Dict: UpperCAmelCase : Union[str, Any] = ZeroShotClassificationPipeline( model=A , tokenizer=A , candidate_labels=["""polics""", """health"""] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _lowercase( self , A , A ) -> Optional[int]: UpperCAmelCase : Dict = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # No kwarg UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , ["""politics"""] ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase : str = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" ) self.assertEqual( A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] ) self.assertEqual( A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) UpperCAmelCase : Dict = classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # https://github.com/huggingface/transformers/issues/13846 UpperCAmelCase : str = classifier(["""I am happy"""] , ["""positive""", """negative"""] ) self.assertEqual( A , [ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(1 ) ] , ) UpperCAmelCase : List[str] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] ) self.assertEqual( A , [ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(2 ) ] , ) with self.assertRaises(A ): classifier("""""" , candidate_labels="""politics""" ) with self.assertRaises(A ): classifier(A , candidate_labels="""politics""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" , candidate_labels=A ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A , ) self.run_entailment_id(A ) def _lowercase( self , A ) -> Any: UpperCAmelCase : Tuple = zero_shot_classifier.model.config UpperCAmelCase : Union[str, Any] = config.labelaid UpperCAmelCase : Tuple = zero_shot_classifier.entailment_id UpperCAmelCase : Any = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) UpperCAmelCase : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase : Any = {"""ENTAIL""": 0, """NON-ENTAIL""": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase : List[str] = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) UpperCAmelCase : Tuple = original_labelaid self.assertEqual(A , zero_shot_classifier.entailment_id ) @require_torch def _lowercase( self ) -> str: UpperCAmelCase : int = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( """Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] ) @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) UpperCAmelCase : Union[str, Any] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , ) UpperCAmelCase : List[Any] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[int] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" ) UpperCAmelCase : Optional[int] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase : str = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , ) self.assertEqual( nested_simplify(A ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def _lowercase( self ) -> List[str]: UpperCAmelCase : int = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" ) UpperCAmelCase : Tuple = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase : Any = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , ) self.assertEqual( nested_simplify(A ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
265
1
'''simple docstring''' import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class UpperCamelCase_ : def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Optional[int]: UpperCAmelCase : List[Any] = parent UpperCAmelCase : Optional[int] = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : Dict = use_input_mask UpperCAmelCase : str = use_token_type_ids UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Dict = hidden_size UpperCAmelCase : Dict = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : int = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : List[str] = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : str = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : List[str] = type_sequence_label_size UpperCAmelCase : int = initializer_range UpperCAmelCase : str = num_labels UpperCAmelCase : Optional[int] = num_choices UpperCAmelCase : Dict = scope UpperCAmelCase : Union[str, Any] = vocab_size - 1 def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Any = None if self.use_input_mask: UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : List[str] = None if self.use_labels: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Optional[int] = self.get_config() return config, input_ids, input_mask, token_labels def _lowercase( self ) -> Optional[Any]: return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.prepare_config_and_inputs() UpperCAmelCase : Any = True return config, input_ids, input_mask, token_labels def _lowercase( self , A , A , A ) -> int: UpperCAmelCase : str = GPTNeoXModel(config=A ) model.to(A ) model.eval() UpperCAmelCase : List[str] = model(A , attention_mask=A ) UpperCAmelCase : List[str] = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A ) -> Optional[int]: UpperCAmelCase : str = True UpperCAmelCase : Optional[Any] = GPTNeoXModel(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = model(A , attention_mask=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A , A ) -> List[str]: UpperCAmelCase : Tuple = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase( self , A , A , A , A ) -> Tuple: UpperCAmelCase : List[str] = self.num_labels UpperCAmelCase : Any = GPTNeoXForQuestionAnswering(A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase( self , A , A , A , A ) -> int: UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase( self , A , A , A , A ) -> str: UpperCAmelCase : List[Any] = self.num_labels UpperCAmelCase : Tuple = GPTNeoXForTokenClassification(A ) model.to(A ) model.eval() UpperCAmelCase : int = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = True UpperCAmelCase : str = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() # first forward pass UpperCAmelCase : List[str] = model(A , attention_mask=A , use_cache=A ) UpperCAmelCase : List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A ) UpperCAmelCase : Any = output_from_no_past["""hidden_states"""][0] UpperCAmelCase : List[str] = model( A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0] # select random slice UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) ) def _lowercase( self ) -> int: UpperCAmelCase : Tuple = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowercase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = GPTNeoXModelTester(self ) UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 ) def _lowercase( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> Optional[Any]: # This regression test was failing with PyTorch < 1.3 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A ) def _lowercase( self ) -> int: UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*A ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def _lowercase( self ) -> Any: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def _lowercase( self ) -> Optional[int]: pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def _lowercase( self , A ) -> str: UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : int = ids_tensor([1, 10] , config.vocab_size ) UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Dict = GPTNeoXModel(A ) original_model.to(A ) original_model.eval() UpperCAmelCase : List[str] = original_model(A ).last_hidden_state UpperCAmelCase : Any = original_model(A ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Any = {"""type""": scaling_type, """factor""": 1_0.0} UpperCAmelCase : str = GPTNeoXModel(A ) scaled_model.to(A ) scaled_model.eval() UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A , A , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: UpperCAmelCase : int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(A ) UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 UpperCAmelCase : List[str] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" UpperCAmelCase : Union[str, Any] = model.generate(**A , do_sample=A , max_new_tokens=20 ) UpperCAmelCase : Tuple = tokenizer.batch_decode(A )[0] self.assertEqual(A , A )
265
'''simple docstring''' import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder a : List[Any] = """__DUMMY_TRANSFORMERS_USER__""" a : Tuple = """Dummy User""" a : Optional[Any] = """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""" a : Optional[Any] = """https://hub-ci.huggingface.co""" a : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}""" a : Tuple = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}""" a : str = Path("""~/.huggingface/hub_ci_token""").expanduser() @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Optional[int]: monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> List[str]: monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _lowercase ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Any: monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]: HfFolder.save_token(_lowercase ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( ) -> str: return HfApi(endpoint=_lowercase ) @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : str = HfFolder.get_token() HfFolder.save_token(_lowercase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Any: def _cleanup_repo(_lowercase ): hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def __lowerCamelCase ( _lowercase ) -> List[str]: @contextmanager def _temporary_repo(_lowercase ): try: yield repo_id finally: cleanup_repo(_lowercase ) return _temporary_repo @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: UpperCAmelCase : str = F'''repo_txt_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : List[Any] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data/text_data.txt""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : Optional[int] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple: UpperCAmelCase : List[Any] = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : List[str] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: return hf_private_dataset_repo_zipped_img_data_
265
1
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[Any] = logging.get_logger(__name__) # TODO Update this a : Tuple = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class UpperCamelCase_ ( __magic_name__ ): lowercase = 'esm' def __init__( self , A=None , A=None , A=None , A=768 , A=12 , A=12 , A=3072 , A=0.1 , A=0.1 , A=1026 , A=0.0_2 , A=1e-12 , A="absolute" , A=True , A=None , A=False , A=False , A=None , A=None , **A , ) -> str: super().__init__(pad_token_id=A , mask_token_id=A , **A ) UpperCAmelCase : Any = vocab_size UpperCAmelCase : Optional[Any] = hidden_size UpperCAmelCase : Tuple = num_hidden_layers UpperCAmelCase : str = num_attention_heads UpperCAmelCase : Dict = intermediate_size UpperCAmelCase : str = hidden_dropout_prob UpperCAmelCase : Any = attention_probs_dropout_prob UpperCAmelCase : Optional[Any] = max_position_embeddings UpperCAmelCase : Optional[int] = initializer_range UpperCAmelCase : Optional[int] = layer_norm_eps UpperCAmelCase : Dict = position_embedding_type UpperCAmelCase : Any = use_cache UpperCAmelCase : Dict = emb_layer_norm_before UpperCAmelCase : Any = token_dropout UpperCAmelCase : Dict = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("""No esmfold_config supplied for folding model, using default values.""" ) UpperCAmelCase : Tuple = EsmFoldConfig() elif isinstance(A , A ): UpperCAmelCase : Dict = EsmFoldConfig(**A ) UpperCAmelCase : Tuple = esmfold_config if vocab_list is None: logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" ) UpperCAmelCase : Optional[int] = get_default_vocab_list() else: UpperCAmelCase : Optional[Any] = vocab_list else: UpperCAmelCase : Any = None UpperCAmelCase : List[Any] = None if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , A ): raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" ) def _lowercase( self ) -> str: UpperCAmelCase : Any = super().to_dict() if isinstance(self.esmfold_config , A ): UpperCAmelCase : Tuple = self.esmfold_config.to_dict() return output @dataclass class UpperCamelCase_ : lowercase = None lowercase = True lowercase = False lowercase = False lowercase = False lowercase = 0 lowercase = True lowercase = False lowercase = 128 lowercase = None def _lowercase( self ) -> Dict: if self.trunk is None: UpperCAmelCase : Optional[Any] = TrunkConfig() elif isinstance(self.trunk , A ): UpperCAmelCase : Dict = TrunkConfig(**self.trunk ) def _lowercase( self ) -> int: UpperCAmelCase : Tuple = asdict(self ) UpperCAmelCase : int = self.trunk.to_dict() return output @dataclass class UpperCamelCase_ : lowercase = 48 lowercase = 1_024 lowercase = 128 lowercase = 32 lowercase = 32 lowercase = 32 lowercase = 0 lowercase = 0 lowercase = False lowercase = 4 lowercase = 128 lowercase = None def _lowercase( self ) -> Union[str, Any]: if self.structure_module is None: UpperCAmelCase : int = StructureModuleConfig() elif isinstance(self.structure_module , A ): UpperCAmelCase : Dict = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( """`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got""" f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( """`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got""" f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) UpperCAmelCase : str = self.sequence_state_dim // self.sequence_head_width UpperCAmelCase : int = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( """`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got""" f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( """`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got""" f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def _lowercase( self ) -> Any: UpperCAmelCase : Any = asdict(self ) UpperCAmelCase : Optional[int] = self.structure_module.to_dict() return output @dataclass class UpperCamelCase_ : lowercase = 384 lowercase = 128 lowercase = 16 lowercase = 128 lowercase = 12 lowercase = 4 lowercase = 8 lowercase = 0.1 lowercase = 8 lowercase = 1 lowercase = 2 lowercase = 7 lowercase = 10 lowercase = 1e-8 lowercase = 1e5 def _lowercase( self ) -> Optional[Any]: return asdict(self ) def __lowerCamelCase ( ) -> int: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
265
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax a : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(__magic_name__ ) class UpperCamelCase_ ( __magic_name__ ): def __init__( self , **A ) -> List[str]: super().__init__(**A ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self , A , **A ) -> Optional[Any]: return super().__call__(A , **A ) def _lowercase( self , **A ) -> Optional[Any]: UpperCAmelCase : List[Any] = {} if "candidate_labels" in kwargs: UpperCAmelCase : Dict = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: UpperCAmelCase : Optional[Any] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def _lowercase( self , A , A=None , A="This is a photo of {}." ) -> Optional[Any]: UpperCAmelCase : int = load_image(A ) UpperCAmelCase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) UpperCAmelCase : List[str] = candidate_labels UpperCAmelCase : Tuple = [hypothesis_template.format(A ) for x in candidate_labels] UpperCAmelCase : Union[str, Any] = self.tokenizer(A , return_tensors=self.framework , padding=A ) UpperCAmelCase : Union[str, Any] = [text_inputs] return inputs def _lowercase( self , A ) -> Optional[int]: UpperCAmelCase : List[Any] = model_inputs.pop("""candidate_labels""" ) UpperCAmelCase : Optional[Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , A ): UpperCAmelCase : Optional[Any] = text_inputs[0] else: # Batching case. UpperCAmelCase : Any = text_inputs[0][0] UpperCAmelCase : Dict = self.model(**A , **A ) UpperCAmelCase : List[Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def _lowercase( self , A ) -> Union[str, Any]: UpperCAmelCase : int = model_outputs.pop("""candidate_labels""" ) UpperCAmelCase : int = model_outputs["""logits"""][0] if self.framework == "pt": UpperCAmelCase : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 ) UpperCAmelCase : Any = probs.tolist() if not isinstance(A , A ): UpperCAmelCase : Any = [scores] elif self.framework == "tf": UpperCAmelCase : List[str] = stable_softmax(A , axis=-1 ) UpperCAmelCase : Union[str, Any] = probs.numpy().tolist() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) UpperCAmelCase : Any = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(A , A ) , key=lambda A : -x[0] ) ] return result
265
1
'''simple docstring''' import pprint import requests a : List[Any] = """https://zenquotes.io/api""" def __lowerCamelCase ( ) -> list: return requests.get(API_ENDPOINT_URL + """/today""" ).json() def __lowerCamelCase ( ) -> list: return requests.get(API_ENDPOINT_URL + """/random""" ).json() if __name__ == "__main__": a : Optional[int] = random_quotes() pprint.pprint(response)
265
'''simple docstring''' from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __lowerCamelCase ( _lowercase ) -> Optional[Any]: return getitem, k def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]: return setitem, k, v def __lowerCamelCase ( _lowercase ) -> int: return delitem, k def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]: try: return fun(_lowercase , *_lowercase ), None except Exception as e: return None, e a : List[str] = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) a : List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] a : int = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] a : List[Any] = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] a : Tuple = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] a : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( """operations""" , ( pytest.param(_add_items , id="""add items""" ), pytest.param(_overwrite_items , id="""overwrite items""" ), pytest.param(_delete_items , id="""delete items""" ), pytest.param(_access_absent_items , id="""access absent items""" ), pytest.param(_add_with_resize_up , id="""add with resize up""" ), pytest.param(_add_with_resize_down , id="""add with resize down""" ), ) , ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : List[str] = HashMap(initial_block_size=4 ) UpperCAmelCase : Dict = {} for _, (fun, *args) in enumerate(_lowercase ): UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase ) UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase ) assert my_res == py_res assert str(_lowercase ) == str(_lowercase ) assert set(_lowercase ) == set(_lowercase ) assert len(_lowercase ) == len(_lowercase ) assert set(my.items() ) == set(py.items() ) def __lowerCamelCase ( ) -> List[Any]: def is_public(_lowercase ) -> bool: return not name.startswith("""_""" ) UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )} UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )} assert dict_public_names > hash_public_names
265
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = LDMTextToImagePipeline lowercase = TEXT_TO_IMAGE_PARAMS - { 'negative_prompt', 'negative_prompt_embeds', 'cross_attention_kwargs', 'prompt_embeds', } lowercase = PipelineTesterMixin.required_optional_params - { 'num_images_per_prompt', 'callback', 'callback_steps', } lowercase = TEXT_TO_IMAGE_BATCH_PARAMS lowercase = False def _lowercase( self ) -> List[Any]: torch.manual_seed(0 ) UpperCAmelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) UpperCAmelCase : str = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , ) torch.manual_seed(0 ) UpperCAmelCase : str = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , ) torch.manual_seed(0 ) UpperCAmelCase : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) UpperCAmelCase : Union[str, Any] = CLIPTextModel(A ) UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCAmelCase : List[str] = { """unet""": unet, """scheduler""": scheduler, """vqvae""": vae, """bert""": text_encoder, """tokenizer""": tokenizer, } return components def _lowercase( self , A , A=0 ) -> Any: if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[Any] = torch.manual_seed(A ) else: UpperCAmelCase : Any = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : Any = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self ) -> Tuple: UpperCAmelCase : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase : Any = self.get_dummy_components() UpperCAmelCase : int = LDMTextToImagePipeline(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Any = self.get_dummy_inputs(A ) UpperCAmelCase : List[str] = pipe(**A ).images UpperCAmelCase : int = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) UpperCAmelCase : Optional[int] = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> Optional[Any]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase( self , A , A=torch.floataa , A=0 ) -> Any: UpperCAmelCase : Optional[Any] = torch.manual_seed(A ) UpperCAmelCase : int = np.random.RandomState(A ).standard_normal((1, 4, 32, 32) ) UpperCAmelCase : List[Any] = torch.from_numpy(A ).to(device=A , dtype=A ) UpperCAmelCase : Any = { """prompt""": """A painting of a squirrel eating a burger""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self ) -> Any: UpperCAmelCase : int = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = self.get_inputs(A ) UpperCAmelCase : List[str] = pipe(**A ).images UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) UpperCAmelCase : List[Any] = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] ) UpperCAmelCase : Union[str, Any] = np.abs(expected_slice - image_slice ).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase( self , A , A=torch.floataa , A=0 ) -> Any: UpperCAmelCase : Union[str, Any] = torch.manual_seed(A ) UpperCAmelCase : Union[str, Any] = np.random.RandomState(A ).standard_normal((1, 4, 32, 32) ) UpperCAmelCase : List[Any] = torch.from_numpy(A ).to(device=A , dtype=A ) UpperCAmelCase : List[Any] = { """prompt""": """A painting of a squirrel eating a burger""", """latents""": latents, """generator""": generator, """num_inference_steps""": 50, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self ) -> Any: UpperCAmelCase : Optional[int] = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Dict = self.get_inputs(A ) UpperCAmelCase : Tuple = pipe(**A ).images[0] UpperCAmelCase : Optional[Any] = load_numpy( """https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" ) UpperCAmelCase : str = np.abs(expected_image - image ).max() assert max_diff < 1e-3
265
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right a : List[str] = 2_5_0_0_0_4 a : List[str] = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = MBartTokenizer lowercase = MBartTokenizerFast lowercase = True lowercase = True def _lowercase( self ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : str = MBartTokenizer(A , keep_accents=A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self ) -> int: UpperCAmelCase : Optional[Any] = MBartTokenizer(A , keep_accents=A ) UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual( A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def _lowercase( self ) -> Union[str, Any]: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : Optional[int] = tempfile.mkdtemp() UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A ) UpperCAmelCase : int = tokenizer_p.save_pretrained(A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=True UpperCAmelCase : Optional[int] = tempfile.mkdtemp() UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A ) UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A ) # Checks it save with the same files self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : Any = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=False UpperCAmelCase : Optional[Any] = tempfile.mkdtemp() UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A ) UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(A ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : str = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( unittest.TestCase ): lowercase = 'facebook/mbart-large-en-ro' lowercase = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] lowercase = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] lowercase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE] @classmethod def _lowercase( cls ) -> Tuple: UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" ) UpperCAmelCase : int = 1 return cls def _lowercase( self ) -> Union[str, Any]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , A ) def _lowercase( self ) -> List[str]: self.assertIn(A , self.tokenizer.all_special_ids ) UpperCAmelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] UpperCAmelCase : int = self.tokenizer.decode(A , skip_special_tokens=A ) UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A ) self.assertEqual(A , A ) self.assertNotIn(self.tokenizer.eos_token , A ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[str] = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , A ) UpperCAmelCase : int = 10 UpperCAmelCase : List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , A ) self.assertEqual(len(A ) , A ) def _lowercase( self ) -> Tuple: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] ) def _lowercase( self ) -> Dict: UpperCAmelCase : Any = tempfile.mkdtemp() UpperCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(A ) UpperCAmelCase : Tuple = MBartTokenizer.from_pretrained(A ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A ) @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" ) UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(A , A ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) UpperCAmelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , A ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" ) UpperCAmelCase : Dict = self.tokenizer( text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" ) UpperCAmelCase : Dict = targets["""input_ids"""] UpperCAmelCase : Union[str, Any] = shift_tokens_right(A , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[Any] = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(A ) , { # A, test, EOS, en_XX """input_ids""": [[62, 3034, 2, 250004]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 250001, } , )
265
1
'''simple docstring''' from math import factorial, radians def __lowerCamelCase ( _lowercase , _lowercase = 1_8 , _lowercase = 1_0 ) -> float: UpperCAmelCase : Tuple = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians UpperCAmelCase : Dict = radians(_lowercase ) UpperCAmelCase : List[Any] = angle_in_radians UpperCAmelCase : Optional[Any] = 3 UpperCAmelCase : List[Any] = -1 for _ in range(_lowercase ): result += (b * (angle_in_radians**a)) / factorial(_lowercase ) UpperCAmelCase : Tuple = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(_lowercase , _lowercase ) if __name__ == "__main__": __import__("""doctest""").testmod()
265
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 a : List[str] = get_tests_dir("""fixtures""") class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: # A mock response for an HTTP head request to emulate server down UpperCAmelCase : Tuple = mock.Mock() UpperCAmelCase : List[str] = 500 UpperCAmelCase : Any = {} UpperCAmelCase : List[str] = HTTPError UpperCAmelCase : str = {} # Download this model to make sure it's in the cache. UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head: UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # This check we did call the fake head request mock_head.assert_called() def _lowercase( self ) -> Any: # This test is for deprecated behavior and can be removed in v5 UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" ) def _lowercase( self ) -> Union[str, Any]: with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" ) self.assertIsNotNone(A ) @is_staging_test class UpperCamelCase_ ( unittest.TestCase ): @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def _lowercase( cls ) -> List[str]: try: delete_repo(token=cls._token , repo_id="""test-image-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" ) except HTTPError: pass def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> List[str]: UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> Optional[int]: CustomImageProcessor.register_for_auto_class() UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
265
1
'''simple docstring''' import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class UpperCamelCase_ : def __init__( self , A , A=2 , A=3 , A=4 , A=2 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=36 , A=3 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=6 , A=6 , A=3 , A=4 , A=None , A=1000 , ) -> int: UpperCAmelCase : Tuple = parent UpperCAmelCase : List[Any] = batch_size UpperCAmelCase : Union[str, Any] = num_channels UpperCAmelCase : Optional[Any] = image_size UpperCAmelCase : Tuple = patch_size UpperCAmelCase : str = text_seq_length UpperCAmelCase : Union[str, Any] = is_training UpperCAmelCase : int = use_input_mask UpperCAmelCase : Optional[Any] = use_token_type_ids UpperCAmelCase : Any = use_labels UpperCAmelCase : Optional[int] = vocab_size UpperCAmelCase : str = hidden_size UpperCAmelCase : List[str] = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : int = intermediate_size UpperCAmelCase : int = hidden_act UpperCAmelCase : Optional[Any] = hidden_dropout_prob UpperCAmelCase : List[str] = attention_probs_dropout_prob UpperCAmelCase : str = max_position_embeddings UpperCAmelCase : Dict = type_vocab_size UpperCAmelCase : Union[str, Any] = type_sequence_label_size UpperCAmelCase : Optional[Any] = initializer_range UpperCAmelCase : List[str] = coordinate_size UpperCAmelCase : Tuple = shape_size UpperCAmelCase : int = num_labels UpperCAmelCase : List[str] = num_choices UpperCAmelCase : Optional[int] = scope UpperCAmelCase : Optional[Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) UpperCAmelCase : Optional[Any] = text_seq_length UpperCAmelCase : Dict = (image_size // patch_size) ** 2 + 1 UpperCAmelCase : Optional[Any] = self.text_seq_length + self.image_seq_length def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) UpperCAmelCase : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase : List[Any] = bbox[i, j, 3] UpperCAmelCase : List[Any] = bbox[i, j, 1] UpperCAmelCase : Optional[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase : Union[str, Any] = bbox[i, j, 2] UpperCAmelCase : int = bbox[i, j, 0] UpperCAmelCase : Optional[Any] = t UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : List[Any] = None if self.use_input_mask: UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] ) UpperCAmelCase : Optional[Any] = None if self.use_token_type_ids: UpperCAmelCase : int = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) UpperCAmelCase : int = None UpperCAmelCase : Any = None if self.use_labels: UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) UpperCAmelCase : Tuple = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def _lowercase( self , A , A , A , A , A , A , A , A ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = LayoutLMvaModel(config=A ) model.to(A ) model.eval() # text + image UpperCAmelCase : Tuple = model(A , pixel_values=A ) UpperCAmelCase : List[Any] = model( A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A ) UpperCAmelCase : List[Any] = model(A , bbox=A , pixel_values=A , token_type_ids=A ) UpperCAmelCase : Optional[Any] = model(A , bbox=A , pixel_values=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only UpperCAmelCase : str = model(A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only UpperCAmelCase : str = model(pixel_values=A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def _lowercase( self , A , A , A , A , A , A , A , A ) -> Optional[int]: UpperCAmelCase : Any = self.num_labels UpperCAmelCase : int = LayoutLMvaForSequenceClassification(A ) model.to(A ) model.eval() UpperCAmelCase : List[str] = model( A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A , labels=A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase( self , A , A , A , A , A , A , A , A ) -> Tuple: UpperCAmelCase : Union[str, Any] = self.num_labels UpperCAmelCase : str = LayoutLMvaForTokenClassification(config=A ) model.to(A ) model.eval() UpperCAmelCase : List[str] = model( A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A , labels=A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def _lowercase( self , A , A , A , A , A , A , A , A ) -> Optional[Any]: UpperCAmelCase : Union[str, Any] = LayoutLMvaForQuestionAnswering(config=A ) model.to(A ) model.eval() UpperCAmelCase : int = model( A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : Union[str, Any] = config_and_inputs UpperCAmelCase : Union[str, Any] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = False lowercase = False lowercase = False lowercase = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) lowercase = ( {'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel} if is_torch_available() else {} ) def _lowercase( self , A , A , A , A , A ) -> Optional[Any]: # `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual # embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has # the sequence dimension of the text embedding only. # (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`) return True def _lowercase( self ) -> Dict: UpperCAmelCase : Optional[int] = LayoutLMvaModelTester(self ) UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 ) def _lowercase( self , A , A , A=False ) -> Any: UpperCAmelCase : Tuple = copy.deepcopy(A ) if model_class in get_values(A ): UpperCAmelCase : Optional[int] = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(A , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(A ): UpperCAmelCase : Tuple = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=A ) elif model_class in get_values(A ): UpperCAmelCase : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=A ) UpperCAmelCase : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=A ) elif model_class in [ *get_values(A ), ]: UpperCAmelCase : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=A ) elif model_class in [ *get_values(A ), ]: UpperCAmelCase : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=A , ) return inputs_dict def _lowercase( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def _lowercase( self ) -> Any: UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase : Any = type self.model_tester.create_and_check_model(*A ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def _lowercase( self ) -> int: UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) def _lowercase( self ) -> Tuple: UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) @slow def _lowercase( self ) -> Optional[Any]: for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : List[str] = LayoutLMvaModel.from_pretrained(A ) self.assertIsNotNone(A ) def __lowerCamelCase ( ) -> Any: UpperCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class UpperCamelCase_ ( unittest.TestCase ): @cached_property def _lowercase( self ) -> int: return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None @slow def _lowercase( self ) -> Any: UpperCAmelCase : Tuple = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(A ) UpperCAmelCase : int = self.default_image_processor UpperCAmelCase : Optional[Any] = prepare_img() UpperCAmelCase : Optional[Any] = image_processor(images=A , return_tensors="""pt""" ).pixel_values.to(A ) UpperCAmelCase : str = torch.tensor([[1, 2]] ) UpperCAmelCase : Union[str, Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass UpperCAmelCase : Tuple = model( input_ids=input_ids.to(A ) , bbox=bbox.to(A ) , pixel_values=pixel_values.to(A ) , ) # verify the logits UpperCAmelCase : Tuple = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , A ) UpperCAmelCase : Optional[Any] = torch.tensor( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(A ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , A , atol=1e-4 ) )
265
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def __lowerCamelCase ( _lowercase ) -> Tuple: return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Any = create_tensor(_lowercase ) UpperCAmelCase : Union[str, Any] = gather(_lowercase ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : Any = [state.process_index] UpperCAmelCase : Union[str, Any] = gather_object(_lowercase ) assert len(_lowercase ) == state.num_processes, F'''{gathered_obj}, {len(_lowercase )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}''' def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Optional[int] = create_tensor(_lowercase ) UpperCAmelCase : List[str] = broadcast(_lowercase ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Tuple: # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device ) else: UpperCAmelCase : Tuple = torch.arange(state.num_processes ).to(state.device ) UpperCAmelCase : Optional[Any] = pad_across_processes(_lowercase ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def __lowerCamelCase ( _lowercase ) -> Dict: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Optional[Any] = create_tensor(_lowercase ) UpperCAmelCase : Optional[Any] = reduce(_lowercase , """sum""" ) UpperCAmelCase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[Any]: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Tuple = create_tensor(_lowercase ) UpperCAmelCase : Optional[int] = reduce(_lowercase , """mean""" ) UpperCAmelCase : str = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[int]: # For xla_spawn (TPUs) main() def __lowerCamelCase ( ) -> int: UpperCAmelCase : List[Any] = PartialState() state.print(F'''State: {state}''' ) state.print("""testing gather""" ) test_gather(_lowercase ) state.print("""testing gather_object""" ) test_gather_object(_lowercase ) state.print("""testing broadcast""" ) test_broadcast(_lowercase ) state.print("""testing pad_across_processes""" ) test_pad_across_processes(_lowercase ) state.print("""testing reduce_sum""" ) test_reduce_sum(_lowercase ) state.print("""testing reduce_mean""" ) test_reduce_mean(_lowercase ) if __name__ == "__main__": main()
265
1
'''simple docstring''' import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py a : int = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. a : Optional[int] = importlib.util.spec_from_file_location( """transformers""", os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) a : List[Any] = spec.loader.load_module() a : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` a : Dict = re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") a : List[str] = { """CLIPConfigMixin""", """DecisionTransformerConfigMixin""", """EncoderDecoderConfigMixin""", """RagConfigMixin""", """SpeechEncoderDecoderConfigMixin""", """VisionEncoderDecoderConfigMixin""", """VisionTextDualEncoderConfigMixin""", } def __lowerCamelCase ( ) -> int: UpperCAmelCase : Tuple = [] for config_class in list(CONFIG_MAPPING.values() ): UpperCAmelCase : Dict = False # source code of `config_class` UpperCAmelCase : Tuple = inspect.getsource(_lowercase ) UpperCAmelCase : List[str] = _re_checkpoint.findall(_lowercase ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` UpperCAmelCase , UpperCAmelCase : Any = checkpoint # verify the checkpoint name corresponds to the checkpoint link UpperCAmelCase : Union[str, Any] = F'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: UpperCAmelCase : Union[str, Any] = True break UpperCAmelCase : List[Any] = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_lowercase ) if len(_lowercase ) > 0: UpperCAmelCase : List[Any] = """\n""".join(sorted(_lowercase ) ) raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
265
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ ( __magic_name__ ): def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]: super().__init__() self.register_modules( vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , ) def _lowercase( self , A = "auto" ) -> List[Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A ) def _lowercase( self ) -> Dict: self.enable_attention_slicing(A ) @torch.no_grad() def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]: if isinstance(A , A ): UpperCAmelCase : List[str] = 1 elif isinstance(A , A ): UpperCAmelCase : Dict = len(A ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(A )}.''' ) # get prompt text embeddings UpperCAmelCase : List[str] = self.tokenizer( A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCAmelCase : List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 ) UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCAmelCase : Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCAmelCase : List[str] if negative_prompt is None: UpperCAmelCase : Any = [""""""] elif type(A ) is not type(A ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !=''' f''' {type(A )}.''' ) elif isinstance(A , A ): UpperCAmelCase : Optional[int] = [negative_prompt] elif batch_size != len(A ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: UpperCAmelCase : Any = negative_prompt UpperCAmelCase : Dict = text_input_ids.shape[-1] UpperCAmelCase : List[Any] = self.tokenizer( A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , ) UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase : int = uncond_embeddings.shape[1] UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 ) UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) UpperCAmelCase : str = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCAmelCase : Dict = torch.randn( A , generator=A , device="""cpu""" , dtype=A ).to(self.device ) UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to( self.device ) else: UpperCAmelCase : int = torch.randn( A , generator=A , device=self.device , dtype=A ) UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A ) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCAmelCase : Optional[Any] = latents_reference.to(self.device ) UpperCAmelCase : Tuple = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2 UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2 UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx UpperCAmelCase : List[str] = 0 if dy < 0 else dy UpperCAmelCase : Union[str, Any] = max(-dx , 0 ) UpperCAmelCase : List[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(A ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCAmelCase : Optional[Any] = {} if accepts_eta: UpperCAmelCase : List[str] = eta for i, t in enumerate(self.progress_bar(A ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase : str = self.scheduler.scale_model_input(A , A ) # predict the noise residual UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample # perform guidance if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 ) UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A , A , A ) UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents UpperCAmelCase : Tuple = self.vae.decode(A ).sample UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to( self.device ) UpperCAmelCase , UpperCAmelCase : int = self.safety_checker( images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: UpperCAmelCase : Any = None if output_type == "pil": UpperCAmelCase : int = self.numpy_to_pil(A ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
265
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = DiTPipeline lowercase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS lowercase = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } lowercase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS lowercase = False def _lowercase( self ) -> Optional[int]: torch.manual_seed(0 ) UpperCAmelCase : Dict = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=A , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=A , ) UpperCAmelCase : Optional[int] = AutoencoderKL() UpperCAmelCase : int = DDIMScheduler() UpperCAmelCase : str = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def _lowercase( self , A , A=0 ) -> str: if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[Any] = torch.manual_seed(A ) else: UpperCAmelCase : Tuple = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : Union[str, Any] = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _lowercase( self ) -> Dict: UpperCAmelCase : Any = """cpu""" UpperCAmelCase : List[Any] = self.get_dummy_components() UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : int = self.get_dummy_inputs(A ) UpperCAmelCase : Any = pipe(**A ).images UpperCAmelCase : Any = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) UpperCAmelCase : int = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] ) UpperCAmelCase : int = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) def _lowercase( self ) -> List[str]: self._test_inference_batch_single_identical(relax_max_difference=A , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _lowercase( self ) -> Union[str, Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> Union[str, Any]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase( self ) -> List[Any]: UpperCAmelCase : Any = torch.manual_seed(0 ) UpperCAmelCase : List[str] = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) UpperCAmelCase : Tuple = ["""vase""", """umbrella""", """white shark""", """white wolf"""] UpperCAmelCase : str = pipe.get_label_ids(A ) UpperCAmelCase : Optional[int] = pipe(A , generator=A , num_inference_steps=40 , output_type="""np""" ).images for word, image in zip(A , A ): UpperCAmelCase : Dict = load_numpy( f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' ) assert np.abs((expected_image - image).max() ) < 1e-2 def _lowercase( self ) -> Tuple: UpperCAmelCase : Optional[Any] = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) UpperCAmelCase : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) UpperCAmelCase : Dict = ["""vase""", """umbrella"""] UpperCAmelCase : Dict = pipe.get_label_ids(A ) UpperCAmelCase : List[Any] = torch.manual_seed(0 ) UpperCAmelCase : List[str] = pipe(A , generator=A , num_inference_steps=25 , output_type="""np""" ).images for word, image in zip(A , A ): UpperCAmelCase : List[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" f'''/dit/{word}_512.npy''' ) assert np.abs((expected_image - image).max() ) < 1e-1
265
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : def __init__( self , A , A=12 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=512 , A=0.0_2 , A=0 , A=None , ) -> Any: UpperCAmelCase : Optional[Any] = parent UpperCAmelCase : str = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : int = use_input_mask UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : Dict = vocab_size UpperCAmelCase : str = hidden_size UpperCAmelCase : List[Any] = projection_dim UpperCAmelCase : Tuple = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : Optional[Any] = intermediate_size UpperCAmelCase : Any = dropout UpperCAmelCase : List[Any] = attention_dropout UpperCAmelCase : Optional[Any] = max_position_embeddings UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Optional[Any] = scope UpperCAmelCase : Union[str, Any] = bos_token_id def _lowercase( self ) -> Tuple: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase : Tuple = input_mask.numpy() UpperCAmelCase , UpperCAmelCase : int = input_mask.shape UpperCAmelCase : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(A ): UpperCAmelCase : Tuple = 1 UpperCAmelCase : Optional[Any] = 0 UpperCAmelCase : int = self.get_config() return config, input_ids, tf.convert_to_tensor(A ) def _lowercase( self ) -> int: return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : int = TFBlipTextModel(config=A ) UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , training=A ) UpperCAmelCase : int = model(A , training=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Dict = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = (TFBlipTextModel,) if is_tf_available() else () lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> int: UpperCAmelCase : Union[str, Any] = BlipTextModelTester(self ) UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 ) def _lowercase( self ) -> Tuple: self.config_tester.run_common_tests() def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def _lowercase( self ) -> List[str]: pass def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def _lowercase( self ) -> Union[str, Any]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Dict: pass @slow def _lowercase( self ) -> Dict: for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Any = TFBlipTextModel.from_pretrained(A ) self.assertIsNotNone(A ) def _lowercase( self , A=True ) -> str: super().test_pt_tf_model_equivalence(allow_missing_keys=A )
265
1
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Dict: return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :] def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase="attention" ) -> Any: UpperCAmelCase : List[str] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] ) UpperCAmelCase : Optional[Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) UpperCAmelCase : str = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] ) UpperCAmelCase : int = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) UpperCAmelCase : Optional[int] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] ) UpperCAmelCase : Any = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) UpperCAmelCase : Dict = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] ) UpperCAmelCase : str = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=False ) -> Dict: if split_mlp_wi: UpperCAmelCase : List[Any] = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :] UpperCAmelCase : List[Any] = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :] UpperCAmelCase : List[str] = (wi_a, wi_a) else: UpperCAmelCase : List[str] = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :] UpperCAmelCase : Tuple = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :] return wi, wo def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> int: return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i] def __lowerCamelCase ( _lowercase , *, _lowercase , _lowercase , _lowercase = False ) -> List[str]: UpperCAmelCase : str = traverse_util.flatten_dict(variables["""target"""] ) UpperCAmelCase : Dict = {"""/""".join(_lowercase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi UpperCAmelCase : Any = """encoder/encoder/mlp/wi_0/kernel""" in old print("""Split MLP:""" , _lowercase ) UpperCAmelCase : int = collections.OrderedDict() # Shared embeddings. UpperCAmelCase : Any = old["""token_embedder/embedding"""] # Encoder. for i in range(_lowercase ): # Block i, layer 0 (Self Attention). UpperCAmelCase : str = tax_layer_norm_lookup(_lowercase , _lowercase , """encoder""" , """pre_attention_layer_norm""" ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = tax_attention_lookup(_lowercase , _lowercase , """encoder""" , """attention""" ) UpperCAmelCase : Optional[Any] = layer_norm UpperCAmelCase : Union[str, Any] = k.T UpperCAmelCase : Union[str, Any] = o.T UpperCAmelCase : Tuple = q.T UpperCAmelCase : str = v.T # Block i, layer 1 (MLP). UpperCAmelCase : Optional[int] = tax_layer_norm_lookup(_lowercase , _lowercase , """encoder""" , """pre_mlp_layer_norm""" ) UpperCAmelCase , UpperCAmelCase : List[Any] = tax_mlp_lookup(_lowercase , _lowercase , """encoder""" , _lowercase ) UpperCAmelCase : Any = layer_norm if split_mlp_wi: UpperCAmelCase : Tuple = wi[0].T UpperCAmelCase : List[Any] = wi[1].T else: UpperCAmelCase : List[Any] = wi.T UpperCAmelCase : Tuple = wo.T if scalable_attention: # convert the rel_embedding of each layer UpperCAmelCase : Union[str, Any] = tax_relpos_bias_lookup( _lowercase , _lowercase , """encoder""" ).T UpperCAmelCase : List[Any] = old["""encoder/encoder_norm/scale"""] if not scalable_attention: UpperCAmelCase : Optional[int] = tax_relpos_bias_lookup( _lowercase , 0 , """encoder""" ).T UpperCAmelCase : List[Any] = tax_relpos_bias_lookup( _lowercase , 0 , """decoder""" ).T if not is_encoder_only: # Decoder. for i in range(_lowercase ): # Block i, layer 0 (Self Attention). UpperCAmelCase : List[Any] = tax_layer_norm_lookup(_lowercase , _lowercase , """decoder""" , """pre_self_attention_layer_norm""" ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = tax_attention_lookup(_lowercase , _lowercase , """decoder""" , """self_attention""" ) UpperCAmelCase : List[Any] = layer_norm UpperCAmelCase : str = k.T UpperCAmelCase : Dict = o.T UpperCAmelCase : str = q.T UpperCAmelCase : Optional[int] = v.T # Block i, layer 1 (Cross Attention). UpperCAmelCase : Union[str, Any] = tax_layer_norm_lookup(_lowercase , _lowercase , """decoder""" , """pre_cross_attention_layer_norm""" ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = tax_attention_lookup(_lowercase , _lowercase , """decoder""" , """encoder_decoder_attention""" ) UpperCAmelCase : Optional[int] = layer_norm UpperCAmelCase : Union[str, Any] = k.T UpperCAmelCase : Tuple = o.T UpperCAmelCase : int = q.T UpperCAmelCase : Optional[int] = v.T # Block i, layer 2 (MLP). UpperCAmelCase : Any = tax_layer_norm_lookup(_lowercase , _lowercase , """decoder""" , """pre_mlp_layer_norm""" ) UpperCAmelCase , UpperCAmelCase : List[str] = tax_mlp_lookup(_lowercase , _lowercase , """decoder""" , _lowercase ) UpperCAmelCase : List[str] = layer_norm if split_mlp_wi: UpperCAmelCase : int = wi[0].T UpperCAmelCase : Optional[int] = wi[1].T else: UpperCAmelCase : Union[str, Any] = wi.T UpperCAmelCase : Tuple = wo.T if scalable_attention: # convert the rel_embedding of each layer UpperCAmelCase : Tuple = tax_relpos_bias_lookup(_lowercase , _lowercase , """decoder""" ).T UpperCAmelCase : int = old["""decoder/decoder_norm/scale"""] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: UpperCAmelCase : Dict = old["""decoder/logits_dense/kernel"""].T return new def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple: UpperCAmelCase : Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: UpperCAmelCase : Union[str, Any] = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: UpperCAmelCase : Optional[int] = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) UpperCAmelCase : Any = state_dict["""shared.weight"""] return state_dict def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any: UpperCAmelCase : Union[str, Any] = checkpoints.load_tax_checkpoint(_lowercase ) UpperCAmelCase : Optional[int] = convert_tax_to_pytorch( _lowercase , num_layers=config.num_layers , is_encoder_only=_lowercase , scalable_attention=_lowercase ) UpperCAmelCase : str = make_state_dict(_lowercase , _lowercase ) model.load_state_dict(_lowercase , strict=_lowercase ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase = False , _lowercase = False , ) -> Optional[int]: UpperCAmelCase : Optional[Any] = MTaConfig.from_json_file(_lowercase ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: UpperCAmelCase : Optional[int] = UMTaEncoderModel(_lowercase ) else: UpperCAmelCase : Dict = UMTaForConditionalGeneration(_lowercase ) # Load weights from tf checkpoint load_tax_weights_in_ta(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(_lowercase ) # Verify that we can load the checkpoint. model.from_pretrained(_lowercase ) print("""Done""" ) if __name__ == "__main__": a : Optional[int] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""") # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False ) parser.add_argument( """--scalable_attention""", action="""store_true""", help="""Whether the model uses scaled attention (umt5 model)""", default=False, ) a : List[str] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
265
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification a : str = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co a : int = """main""" # Default branch name a : Any = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2""" # One particular commit (not the top of `main`) a : str = """aaaaaaa""" # This commit does not exist, so we should 404. a : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684""" # Sha-1 of config.json on the top of `main`, for checking purposes a : Any = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3""" @contextlib.contextmanager def __lowerCamelCase ( ) -> List[str]: print("""Welcome!""" ) yield print("""Bye!""" ) @contextlib.contextmanager def __lowerCamelCase ( ) -> Optional[int]: print("""Bonjour!""" ) yield print("""Au revoir!""" ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> List[Any]: # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec("""transformers""" ) is not None class UpperCamelCase_ ( unittest.TestCase ): @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Tuple: with ContextManagers([] ): print("""Transformers are awesome!""" ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Dict: with ContextManagers([context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Union[str, Any]: with ContextManagers([context_fr(), context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" ) @require_torch def _lowercase( self ) -> Optional[int]: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_tf def _lowercase( self ) -> int: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_flax def _lowercase( self ) -> Any: # Flax models don't have labels self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , [] )
265
1
'''simple docstring''' import collections import importlib.util import os import re from pathlib import Path a : str = """src/transformers""" # Matches is_xxx_available() a : Tuple = re.compile(R"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} a : int = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] a : List[str] = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available a : Optional[int] = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") a : Dict = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] a : Union[str, Any] = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", a : Any = re.compile("""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], a : List[Any] = re.compile("""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo a : Union[str, Any] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: a : Union[str, Any] = re.compile(R"""^\s*try:""") # Catches a line with else: a : Union[str, Any] = re.compile(R"""^\s*else:""") def __lowerCamelCase ( _lowercase ) -> Dict: if _re_test_backend.search(_lowercase ) is None: return None UpperCAmelCase : Optional[Any] = [b[0] for b in _re_backend.findall(_lowercase )] backends.sort() return "_and_".join(_lowercase ) def __lowerCamelCase ( _lowercase ) -> str: with open(_lowercase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: UpperCAmelCase : Optional[int] = f.readlines() UpperCAmelCase : str = 0 while line_index < len(_lowercase ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_lowercase ): return None # First grab the objects without a specific backend in _import_structure UpperCAmelCase : Tuple = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: UpperCAmelCase : int = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_lowercase ): UpperCAmelCase : Any = _re_one_line_import_struct.search(_lowercase ).groups()[0] UpperCAmelCase : int = re.findall("""\[([^\]]+)\]""" , _lowercase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue UpperCAmelCase : Union[str, Any] = _re_import_struct_key_value.search(_lowercase ) if single_line_import_search is not None: UpperCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_lowercase ) > 0] objects.extend(_lowercase ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 UpperCAmelCase : str = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCAmelCase : List[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase : Optional[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase : Union[str, Any] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): UpperCAmelCase : List[Any] = lines[line_index] if _re_import_struct_add_one.search(_lowercase ) is not None: objects.append(_re_import_struct_add_one.search(_lowercase ).groups()[0] ) elif _re_import_struct_add_many.search(_lowercase ) is not None: UpperCAmelCase : Optional[int] = _re_import_struct_add_many.search(_lowercase ).groups()[0].split(""", """ ) UpperCAmelCase : int = [obj[1:-1] for obj in imports if len(_lowercase ) > 0] objects.extend(_lowercase ) elif _re_between_brackets.search(_lowercase ) is not None: UpperCAmelCase : Tuple = _re_between_brackets.search(_lowercase ).groups()[0].split(""", """ ) UpperCAmelCase : Optional[Any] = [obj[1:-1] for obj in imports if len(_lowercase ) > 0] objects.extend(_lowercase ) elif _re_quote_object.search(_lowercase ) is not None: objects.append(_re_quote_object.search(_lowercase ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 1_2 + """\"""" ): objects.append(line[1_3:-3] ) line_index += 1 UpperCAmelCase : Optional[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCAmelCase : Dict = [] while ( line_index < len(_lowercase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): UpperCAmelCase : List[str] = lines[line_index] UpperCAmelCase : str = _re_import.search(_lowercase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCAmelCase : Dict = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(_lowercase ): # If the line is an if is_backend_available, we grab all objects associated. UpperCAmelCase : int = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase : List[str] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase : List[str] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): UpperCAmelCase : Tuple = lines[line_index] UpperCAmelCase : Union[str, Any] = _re_import.search(_lowercase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 1_2 ): objects.append(line[1_2:-2] ) line_index += 1 UpperCAmelCase : Any = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __lowerCamelCase ( _lowercase , _lowercase ) -> Union[str, Any]: def find_duplicates(_lowercase ): return [k for k, v in collections.Counter(_lowercase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCAmelCase : List[Any] = [] for key in import_dict_objects.keys(): UpperCAmelCase : Optional[int] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) UpperCAmelCase : Optional[int] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCAmelCase : Dict = """base imports""" if key == """none""" else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def __lowerCamelCase ( ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = [] for root, _, files in os.walk(_lowercase ): if "__init__.py" in files: UpperCAmelCase : Tuple = os.path.join(_lowercase , """__init__.py""" ) UpperCAmelCase : str = parse_init(_lowercase ) if objects is not None: UpperCAmelCase : Tuple = analyze_results(*_lowercase ) if len(_lowercase ) > 0: UpperCAmelCase : List[Any] = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(_lowercase ) ) if len(_lowercase ) > 0: raise ValueError("""\n\n""".join(_lowercase ) ) def __lowerCamelCase ( ) -> Any: UpperCAmelCase : int = [] for path, directories, files in os.walk(_lowercase ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(_lowercase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_lowercase ) / folder).glob("""*.py""" ) ) ) == 0: continue UpperCAmelCase : Tuple = str((Path(_lowercase ) / folder).relative_to(_lowercase ) ) UpperCAmelCase : Union[str, Any] = short_path.replace(os.path.sep , """.""" ) submodules.append(_lowercase ) for fname in files: if fname == "__init__.py": continue UpperCAmelCase : Any = str((Path(_lowercase ) / fname).relative_to(_lowercase ) ) UpperCAmelCase : Optional[int] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(_lowercase ) return submodules a : Tuple = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", ] def __lowerCamelCase ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. UpperCAmelCase : Tuple = importlib.util.spec_from_file_location( """transformers""" , os.path.join(_lowercase , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) UpperCAmelCase : Union[str, Any] = spec.loader.load_module() UpperCAmelCase : Optional[int] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(_lowercase ) > 0: UpperCAmelCase : str = """\n""".join(F'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" F'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
265
'''simple docstring''' from itertools import count def __lowerCamelCase ( _lowercase = 5_0 ) -> int: UpperCAmelCase : Any = [1] * min_block_length for n in count(_lowercase ): fill_count_functions.append(1 ) for block_length in range(_lowercase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_0_0_0_0_0_0: break return n if __name__ == "__main__": print(F'''{solution() = }''')
265
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Optional[int] = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
'''simple docstring''' from __future__ import annotations import math class UpperCamelCase_ : def __init__( self , A ) -> None: UpperCAmelCase : Optional[int] = size # approximate the overall size of segment tree with given value UpperCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )] # create array to store lazy update UpperCAmelCase : Any = [0 for i in range(0 , 4 * size )] UpperCAmelCase : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update def _lowercase( self , A ) -> int: return idx * 2 def _lowercase( self , A ) -> int: return idx * 2 + 1 def _lowercase( self , A , A , A , A ) -> None: if left_element == right_element: UpperCAmelCase : str = a[left_element - 1] else: UpperCAmelCase : Tuple = (left_element + right_element) // 2 self.build(self.left(A ) , A , A , A ) self.build(self.right(A ) , mid + 1 , A , A ) UpperCAmelCase : str = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) def _lowercase( self , A , A , A , A , A , A ) -> bool: if self.flag[idx] is True: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : int = False if left_element != right_element: UpperCAmelCase : List[str] = self.lazy[idx] UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : int = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: UpperCAmelCase : Optional[Any] = val if left_element != right_element: UpperCAmelCase : Tuple = val UpperCAmelCase : int = val UpperCAmelCase : Any = True UpperCAmelCase : str = True return True UpperCAmelCase : str = (left_element + right_element) // 2 self.update(self.left(A ) , A , A , A , A , A ) self.update(self.right(A ) , mid + 1 , A , A , A , A ) UpperCAmelCase : List[str] = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) return True def _lowercase( self , A , A , A , A , A ) -> int | float: if self.flag[idx] is True: UpperCAmelCase : Any = self.lazy[idx] UpperCAmelCase : Any = False if left_element != right_element: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : Tuple = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : Tuple = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] UpperCAmelCase : Dict = (left_element + right_element) // 2 UpperCAmelCase : List[Any] = self.query(self.left(A ) , A , A , A , A ) UpperCAmelCase : str = self.query(self.right(A ) , mid + 1 , A , A , A ) return max(A , A ) def __str__( self ) -> str: return str([self.query(1 , 1 , self.size , A , A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": a : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] a : Optional[Any] = 1_5 a : Union[str, Any] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
265
1
'''simple docstring''' import numpy as np def __lowerCamelCase ( _lowercase ) -> np.array: return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
265
'''simple docstring''' from PIL import Image def __lowerCamelCase ( _lowercase , _lowercase ) -> Image: def brightness(_lowercase ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(_lowercase ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 a : Optional[Any] = change_brightness(img, 1_0_0) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
265
1
'''simple docstring''' import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def __lowerCamelCase ( _lowercase ) -> Optional[int]: random.seed(_lowercase ) np.random.seed(_lowercase ) torch.manual_seed(_lowercase ) torch.cuda.manual_seed_all(_lowercase ) # ^^ safe to call this function even if cuda is not available class UpperCamelCase_ : def __init__( self , A , A = 0.9_9_9_9 , A = 0.0 , A = 0 , A = False , A = 1.0 , A = 2 / 3 , A = None , A = None , **A , ) -> Optional[Any]: if isinstance(A , torch.nn.Module ): UpperCAmelCase : Union[str, Any] = ( """Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , A , standard_warn=A , ) UpperCAmelCase : Union[str, Any] = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility UpperCAmelCase : Union[str, Any] = True if kwargs.get("""max_value""" , A ) is not None: UpperCAmelCase : List[str] = """The `max_value` argument is deprecated. Please use `decay` instead.""" deprecate("""max_value""" , """1.0.0""" , A , standard_warn=A ) UpperCAmelCase : Optional[Any] = kwargs["""max_value"""] if kwargs.get("""min_value""" , A ) is not None: UpperCAmelCase : Tuple = """The `min_value` argument is deprecated. Please use `min_decay` instead.""" deprecate("""min_value""" , """1.0.0""" , A , standard_warn=A ) UpperCAmelCase : Union[str, Any] = kwargs["""min_value"""] UpperCAmelCase : List[str] = list(A ) UpperCAmelCase : Union[str, Any] = [p.clone().detach() for p in parameters] if kwargs.get("""device""" , A ) is not None: UpperCAmelCase : Tuple = """The `device` argument is deprecated. Please use `to` instead.""" deprecate("""device""" , """1.0.0""" , A , standard_warn=A ) self.to(device=kwargs["""device"""] ) UpperCAmelCase : Union[str, Any] = None UpperCAmelCase : Tuple = decay UpperCAmelCase : str = min_decay UpperCAmelCase : str = update_after_step UpperCAmelCase : Dict = use_ema_warmup UpperCAmelCase : Union[str, Any] = inv_gamma UpperCAmelCase : str = power UpperCAmelCase : List[Any] = 0 UpperCAmelCase : int = None # set in `step()` UpperCAmelCase : Optional[int] = model_cls UpperCAmelCase : Any = model_config @classmethod def _lowercase( cls , A , A ) -> "EMAModel": UpperCAmelCase , UpperCAmelCase : Optional[int] = model_cls.load_config(A , return_unused_kwargs=A ) UpperCAmelCase : List[str] = model_cls.from_pretrained(A ) UpperCAmelCase : Any = cls(model.parameters() , model_cls=A , model_config=model.config ) ema_model.load_state_dict(A ) return ema_model def _lowercase( self , A ) -> Dict: if self.model_cls is None: raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" ) if self.model_config is None: raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" ) UpperCAmelCase : List[str] = self.model_cls.from_config(self.model_config ) UpperCAmelCase : Optional[Any] = self.state_dict() state_dict.pop("""shadow_params""" , A ) model.register_to_config(**A ) self.copy_to(model.parameters() ) model.save_pretrained(A ) def _lowercase( self , A ) -> float: UpperCAmelCase : Optional[int] = max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: UpperCAmelCase : List[str] = 1 - (1 + step / self.inv_gamma) ** -self.power else: UpperCAmelCase : Tuple = (1 + step) / (10 + step) UpperCAmelCase : Optional[int] = min(A , self.decay ) # make sure decay is not smaller than min_decay UpperCAmelCase : Union[str, Any] = max(A , self.min_decay ) return cur_decay_value @torch.no_grad() def _lowercase( self , A ) -> List[Any]: if isinstance(A , torch.nn.Module ): UpperCAmelCase : Optional[int] = ( """Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( """passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , A , standard_warn=A , ) UpperCAmelCase : Dict = parameters.parameters() UpperCAmelCase : Optional[int] = list(A ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. UpperCAmelCase : Union[str, Any] = self.get_decay(self.optimization_step ) UpperCAmelCase : Dict = decay UpperCAmelCase : List[Any] = 1 - decay UpperCAmelCase : Tuple = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , A ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): UpperCAmelCase : List[Any] = deepspeed.zero.GatheredParameters(A , modifier_rank=A ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(A ) def _lowercase( self , A ) -> None: UpperCAmelCase : List[Any] = list(A ) for s_param, param in zip(self.shadow_params , A ): param.data.copy_(s_param.to(param.device ).data ) def _lowercase( self , A=None , A=None ) -> None: UpperCAmelCase : List[str] = [ p.to(device=A , dtype=A ) if p.is_floating_point() else p.to(device=A ) for p in self.shadow_params ] def _lowercase( self ) -> dict: return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def _lowercase( self , A ) -> None: UpperCAmelCase : Dict = [param.detach().cpu().clone() for param in parameters] def _lowercase( self , A ) -> None: if self.temp_stored_params is None: raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" ) for c_param, param in zip(self.temp_stored_params , A ): param.data.copy_(c_param.data ) # Better memory-wise. UpperCAmelCase : Optional[Any] = None def _lowercase( self , A ) -> None: UpperCAmelCase : Tuple = copy.deepcopy(A ) UpperCAmelCase : Optional[Any] = state_dict.get("""decay""" , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError("""Decay must be between 0 and 1""" ) UpperCAmelCase : Tuple = state_dict.get("""min_decay""" , self.min_decay ) if not isinstance(self.min_decay , A ): raise ValueError("""Invalid min_decay""" ) UpperCAmelCase : List[Any] = state_dict.get("""optimization_step""" , self.optimization_step ) if not isinstance(self.optimization_step , A ): raise ValueError("""Invalid optimization_step""" ) UpperCAmelCase : str = state_dict.get("""update_after_step""" , self.update_after_step ) if not isinstance(self.update_after_step , A ): raise ValueError("""Invalid update_after_step""" ) UpperCAmelCase : Dict = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , A ): raise ValueError("""Invalid use_ema_warmup""" ) UpperCAmelCase : str = state_dict.get("""inv_gamma""" , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError("""Invalid inv_gamma""" ) UpperCAmelCase : List[str] = state_dict.get("""power""" , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError("""Invalid power""" ) UpperCAmelCase : Optional[int] = state_dict.get("""shadow_params""" , A ) if shadow_params is not None: UpperCAmelCase : Optional[Any] = shadow_params if not isinstance(self.shadow_params , A ): raise ValueError("""shadow_params must be a list""" ) if not all(isinstance(A , torch.Tensor ) for p in self.shadow_params ): raise ValueError("""shadow_params must all be Tensors""" )
265
'''simple docstring''' import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class UpperCamelCase_ : def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Optional[int]: UpperCAmelCase : List[Any] = parent UpperCAmelCase : Optional[int] = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : Dict = use_input_mask UpperCAmelCase : str = use_token_type_ids UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Dict = hidden_size UpperCAmelCase : Dict = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : int = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : List[str] = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : str = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : List[str] = type_sequence_label_size UpperCAmelCase : int = initializer_range UpperCAmelCase : str = num_labels UpperCAmelCase : Optional[int] = num_choices UpperCAmelCase : Dict = scope UpperCAmelCase : Union[str, Any] = vocab_size - 1 def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Any = None if self.use_input_mask: UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : List[str] = None if self.use_labels: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Optional[int] = self.get_config() return config, input_ids, input_mask, token_labels def _lowercase( self ) -> Optional[Any]: return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.prepare_config_and_inputs() UpperCAmelCase : Any = True return config, input_ids, input_mask, token_labels def _lowercase( self , A , A , A ) -> int: UpperCAmelCase : str = GPTNeoXModel(config=A ) model.to(A ) model.eval() UpperCAmelCase : List[str] = model(A , attention_mask=A ) UpperCAmelCase : List[str] = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A ) -> Optional[int]: UpperCAmelCase : str = True UpperCAmelCase : Optional[Any] = GPTNeoXModel(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = model(A , attention_mask=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A , A ) -> List[str]: UpperCAmelCase : Tuple = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase( self , A , A , A , A ) -> Tuple: UpperCAmelCase : List[str] = self.num_labels UpperCAmelCase : Any = GPTNeoXForQuestionAnswering(A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase( self , A , A , A , A ) -> int: UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase( self , A , A , A , A ) -> str: UpperCAmelCase : List[Any] = self.num_labels UpperCAmelCase : Tuple = GPTNeoXForTokenClassification(A ) model.to(A ) model.eval() UpperCAmelCase : int = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = True UpperCAmelCase : str = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() # first forward pass UpperCAmelCase : List[str] = model(A , attention_mask=A , use_cache=A ) UpperCAmelCase : List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A ) UpperCAmelCase : Any = output_from_no_past["""hidden_states"""][0] UpperCAmelCase : List[str] = model( A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0] # select random slice UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) ) def _lowercase( self ) -> int: UpperCAmelCase : Tuple = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowercase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = GPTNeoXModelTester(self ) UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 ) def _lowercase( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> Optional[Any]: # This regression test was failing with PyTorch < 1.3 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A ) def _lowercase( self ) -> int: UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*A ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def _lowercase( self ) -> Any: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def _lowercase( self ) -> Optional[int]: pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def _lowercase( self , A ) -> str: UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : int = ids_tensor([1, 10] , config.vocab_size ) UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Dict = GPTNeoXModel(A ) original_model.to(A ) original_model.eval() UpperCAmelCase : List[str] = original_model(A ).last_hidden_state UpperCAmelCase : Any = original_model(A ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Any = {"""type""": scaling_type, """factor""": 1_0.0} UpperCAmelCase : str = GPTNeoXModel(A ) scaled_model.to(A ) scaled_model.eval() UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A , A , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: UpperCAmelCase : int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(A ) UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 UpperCAmelCase : List[str] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" UpperCAmelCase : Union[str, Any] = model.generate(**A , do_sample=A , max_new_tokens=20 ) UpperCAmelCase : Tuple = tokenizer.batch_decode(A )[0] self.assertEqual(A , A )
265
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a : Tuple = logging.get_logger(__name__) a : Optional[int] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class UpperCamelCase_ ( __magic_name__ ): lowercase = 'megatron-bert' def __init__( self , A=29056 , A=1024 , A=24 , A=16 , A=4096 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.0_2 , A=1e-12 , A=0 , A="absolute" , A=True , **A , ) -> Optional[int]: super().__init__(pad_token_id=A , **A ) UpperCAmelCase : Tuple = vocab_size UpperCAmelCase : List[Any] = hidden_size UpperCAmelCase : Tuple = num_hidden_layers UpperCAmelCase : List[Any] = num_attention_heads UpperCAmelCase : Optional[int] = hidden_act UpperCAmelCase : str = intermediate_size UpperCAmelCase : str = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : Union[str, Any] = max_position_embeddings UpperCAmelCase : Union[str, Any] = type_vocab_size UpperCAmelCase : List[Any] = initializer_range UpperCAmelCase : Optional[int] = layer_norm_eps UpperCAmelCase : Any = position_embedding_type UpperCAmelCase : str = use_cache
265
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> int: if b == 0: return 1 if (b % 2) == 0: return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) else: return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) def __lowerCamelCase ( _lowercase , _lowercase ) -> float: if b < 0: return 1 / actual_power(_lowercase , _lowercase ) return actual_power(_lowercase , _lowercase ) if __name__ == "__main__": print(power(-2, -3))
265
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCamelCase_ ( __magic_name__ ): lowercase = ['image_processor', 'tokenizer'] lowercase = 'ViltImageProcessor' lowercase = ('BertTokenizer', 'BertTokenizerFast') def __init__( self , A=None , A=None , **A ) -> Dict: UpperCAmelCase : Tuple = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , A , ) UpperCAmelCase : Any = kwargs.pop("""feature_extractor""" ) UpperCAmelCase : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(A , A ) UpperCAmelCase : Union[str, Any] = self.image_processor def __call__( self , A , A = None , A = True , A = False , A = None , A = None , A = 0 , A = None , A = None , A = None , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> BatchEncoding: UpperCAmelCase : Union[str, Any] = self.tokenizer( text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , ) # add pixel_values + pixel_mask UpperCAmelCase : int = self.image_processor(A , return_tensors=A ) encoding.update(A ) return encoding def _lowercase( self , *A , **A ) -> Dict: return self.tokenizer.batch_decode(*A , **A ) def _lowercase( self , *A , **A ) -> List[str]: return self.tokenizer.decode(*A , **A ) @property def _lowercase( self ) -> List[Any]: UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names UpperCAmelCase : Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _lowercase( self ) -> Dict: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A , ) return self.image_processor_class @property def _lowercase( self ) -> List[str]: warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , A , ) return self.image_processor
265
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : Any = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = AlbertTokenizer lowercase = AlbertTokenizerFast lowercase = True lowercase = True lowercase = True def _lowercase( self ) -> str: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : Optional[int] = AlbertTokenizer(A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self , A ) -> int: UpperCAmelCase : Optional[int] = """this is a test""" UpperCAmelCase : Dict = """this is a test""" return input_text, output_text def _lowercase( self ) -> int: UpperCAmelCase : Tuple = """<pad>""" UpperCAmelCase : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A ) def _lowercase( self ) -> Any: UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(A ) , 30000 ) def _lowercase( self ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _lowercase( self ) -> Union[str, Any]: if not self.test_rust_tokenizer: return UpperCAmelCase : int = self.get_tokenizer() UpperCAmelCase : List[str] = self.get_rust_tokenizer() UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé.""" UpperCAmelCase : str = tokenizer.tokenize(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A ) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A ) self.assertListEqual(A , A ) UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A ) self.assertListEqual(A , A ) def _lowercase( self ) -> Any: UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A ) UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : str = AlbertTokenizer(A ) UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" ) UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" ) UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def _lowercase( self ) -> Dict: # fmt: off UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
265
1
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup a : Union[str, Any] = [ """kernels/rwkv/wkv_cuda.cu""", """kernels/rwkv/wkv_op.cpp""", """kernels/deformable_detr/ms_deform_attn.h""", """kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""", """models/graphormer/algos_graphormer.pyx""", ] def __lowerCamelCase ( _lowercase ) -> int: # Test all the extensions added in the setup for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": a : str = argparse.ArgumentParser() parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""") a : Optional[Any] = parser.parse_args() if args.check_lib: a : Optional[Any] = importlib.import_module("""transformers""") a : Tuple = Path(transformers_module.__file__).parent else: a : Dict = Path.cwd() / """build/lib/transformers""" if not test_custom_files_are_present(transformers_path): raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
265
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = StableDiffusionDiffEditPipeline lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} lowercase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase = frozenset([] ) def _lowercase( self ) -> Optional[int]: torch.manual_seed(0 ) UpperCAmelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , ) UpperCAmelCase : int = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , ) UpperCAmelCase : List[Any] = DDIMInverseScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , ) torch.manual_seed(0 ) UpperCAmelCase : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) UpperCAmelCase : Optional[Any] = CLIPTextModel(A ) UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCAmelCase : int = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _lowercase( self , A , A=0 ) -> Optional[Any]: UpperCAmelCase : Any = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A ) if str(A ).startswith("""mps""" ): UpperCAmelCase : List[Any] = torch.manual_seed(A ) else: UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : int = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> Optional[int]: UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : Any = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> str: UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : str = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def _lowercase( self ) -> List[Any]: if not hasattr(self.pipeline_class , """_optional_components""" ): return UpperCAmelCase : Dict = self.get_dummy_components() UpperCAmelCase : int = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A , A , A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) UpperCAmelCase : Any = self.get_dummy_inputs(A ) UpperCAmelCase : Optional[Any] = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) UpperCAmelCase : Dict = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A , A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A ) UpperCAmelCase : Tuple = pipe_loaded(**A )[0] UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max() self.assertLess(A , 1e-4 ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[int] = """cpu""" UpperCAmelCase : Optional[Any] = self.get_dummy_components() UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(A ) UpperCAmelCase : List[Any] = pipe.generate_mask(**A ) UpperCAmelCase : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) UpperCAmelCase : Optional[int] = np.array([0] * 9 ) UpperCAmelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = """cpu""" UpperCAmelCase : List[str] = self.get_dummy_components() UpperCAmelCase : Optional[Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : List[str] = pipe.invert(**A ).images UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Dict = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) def _lowercase( self ) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def _lowercase( self ) -> int: UpperCAmelCase : List[Any] = """cpu""" UpperCAmelCase : int = self.get_dummy_components() UpperCAmelCase : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""} UpperCAmelCase : int = DPMSolverMultistepScheduler(**A ) UpperCAmelCase : int = DPMSolverMultistepInverseScheduler(**A ) UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : Any = pipe.invert(**A ).images UpperCAmelCase : Dict = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Any = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) @require_torch_gpu @slow class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" ) UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) ) UpperCAmelCase : List[str] = raw_image def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Dict = torch.manual_seed(0 ) UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = """a bowl of fruit""" UpperCAmelCase : List[Any] = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Tuple = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents UpperCAmelCase : Any = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0] UpperCAmelCase : List[str] = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1 def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : int = """a bowl of fruit""" UpperCAmelCase : int = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Any = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents UpperCAmelCase : str = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0] UpperCAmelCase : Tuple = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1
265
1
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> float: UpperCAmelCase : Optional[Any] = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("""All input parameters must be positive""" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("""Relative densities cannot be greater than one""" ) else: UpperCAmelCase : List[str] = 1 - (matter_density + radiation_density + dark_energy) UpperCAmelCase : Dict = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) UpperCAmelCase : Union[str, Any] = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation a : Any = 0.3 print( hubble_parameter( hubble_constant=6_8.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
265
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : Dict = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_lowercase , _lowercase ) def __lowerCamelCase ( _lowercase ) -> Tuple: UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape UpperCAmelCase : Dict = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) UpperCAmelCase : Optional[Any] = emb.weight.data return lin_layer def __lowerCamelCase ( _lowercase , _lowercase=None ) -> List[Any]: UpperCAmelCase : Dict = {} for old_key in state_dict.keys(): UpperCAmelCase : str = old_key if "moe_layer.experts." in key: if expert_idx is not None: UpperCAmelCase : int = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' ) else: UpperCAmelCase : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" ) if "gate" in key: UpperCAmelCase : str = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" ) if "fc2" and "experts" not in key: UpperCAmelCase : Tuple = key.replace(""".fc2.""" , """.ffn.fc2.""" ) if "fc1" and "experts" not in key: UpperCAmelCase : List[Any] = key.replace(""".fc1.""" , """.ffn.fc1.""" ) if ".encoder_attn." in key: UpperCAmelCase : Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" ) if "encoder_attn_layer_norm" in key: UpperCAmelCase : str = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" ) if "final_layer_norm" in key: UpperCAmelCase : Optional[int] = key.replace("""final_layer_norm""" , """ff_layer_norm""" ) UpperCAmelCase : str = state_dict[old_key] return new_dict def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ) -> Tuple: UpperCAmelCase : Any = [] UpperCAmelCase : Dict = 0 os.makedirs(_lowercase , exist_ok=_lowercase ) for expert in range(_lowercase ): UpperCAmelCase : str = switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(_lowercase ): UpperCAmelCase : Optional[Any] = torch.load(_lowercase )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : List[str] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : Optional[Any] = os.path.join( _lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) torch.save(_lowercase , _lowercase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_lowercase )[0]].dtype ) # Add the last block UpperCAmelCase : Optional[Any] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) UpperCAmelCase : Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : Optional[Any] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : int = shared_weights["""decoder.embed_tokens.weight"""] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_lowercase ) == 1: UpperCAmelCase : Dict = os.path.join(_lowercase , _lowercase ) torch.save(_lowercase , _lowercase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_lowercase , _lowercase ) # Otherwise, let's build the index UpperCAmelCase : Optional[int] = {} for idx, shard in enumerate(_lowercase ): UpperCAmelCase : List[str] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase ):05d}.bin''' ) UpperCAmelCase : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_lowercase , os.path.join(_lowercase , _lowercase ) ) for key in shard: UpperCAmelCase : Tuple = shard_file # Add the metadata UpperCAmelCase : Any = {"""total_size""": total_size} UpperCAmelCase : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(_lowercase , _lowercase ) , """w""" , encoding="""utf-8""" ) as f: UpperCAmelCase : str = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + """\n""" f.write(_lowercase ) return metadata, index if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) a : int = parser.parse_args() a , a : Any = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) a : str = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) a : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
265
1
'''simple docstring''' def __lowerCamelCase ( _lowercase ) -> int: UpperCAmelCase : List[Any] = len(_lowercase ) UpperCAmelCase : str = len(matrix[0] ) UpperCAmelCase : str = min(_lowercase , _lowercase ) for row in range(_lowercase ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 , _lowercase ): UpperCAmelCase : List[str] = matrix[col][row] / matrix[row][row] for i in range(_lowercase , _lowercase ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows UpperCAmelCase : Union[str, Any] = True for i in range(row + 1 , _lowercase ): if matrix[i][row] != 0: UpperCAmelCase , UpperCAmelCase : int = matrix[i], matrix[row] UpperCAmelCase : Union[str, Any] = False break if reduce: rank -= 1 for i in range(_lowercase ): UpperCAmelCase : List[Any] = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
265
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a : List[Any] = logging.get_logger(__name__) a : Union[str, Any] = torch.device("""cpu""") def __lowerCamelCase ( ) -> Any: UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) return im def __lowerCamelCase ( _lowercase ) -> Dict: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str: UpperCAmelCase : Union[str, Any] = dct.pop(_lowercase ) UpperCAmelCase : str = val def __lowerCamelCase ( _lowercase ) -> List[str]: UpperCAmelCase : Tuple = [] for k in state_dict.keys(): UpperCAmelCase : Dict = k if ".pwconv" in k: UpperCAmelCase : Union[str, Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: UpperCAmelCase : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: UpperCAmelCase : Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: UpperCAmelCase : Optional[Any] = k_new.split(""".""" ) if ls[2].isdigit(): UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[Any] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size UpperCAmelCase : List[Any] = 1_0_0_0 UpperCAmelCase : List[str] = """huggingface/label-files""" UpperCAmelCase : Tuple = """imagenet-1k-id2label.json""" UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase : Tuple = {int(_lowercase ): v for k, v in idalabel.items()} UpperCAmelCase : Tuple = idalabel UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": UpperCAmelCase : List[Any] = [3, 3, 6, 4] UpperCAmelCase : int = [4_8, 5_6, 1_1_2, 2_2_0] elif swiftformer_name == "swiftformer_s": UpperCAmelCase : str = [3, 3, 9, 6] UpperCAmelCase : str = [4_8, 6_4, 1_6_8, 2_2_4] elif swiftformer_name == "swiftformer_l1": UpperCAmelCase : List[Any] = [4, 3, 1_0, 5] UpperCAmelCase : Union[str, Any] = [4_8, 9_6, 1_9_2, 3_8_4] elif swiftformer_name == "swiftformer_l3": UpperCAmelCase : Any = [4, 4, 1_2, 6] UpperCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase ) else: UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" ) UpperCAmelCase : str = checkpoint UpperCAmelCase : Tuple = create_rename_keys(_lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) # load HuggingFace model UpperCAmelCase : str = SwiftFormerForImageClassification(_lowercase ).eval() hf_model.load_state_dict(_lowercase ) # prepare test inputs UpperCAmelCase : Any = prepare_img() UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) UpperCAmelCase : List[str] = processor(images=_lowercase , return_tensors="""pt""" ) # compare outputs from both models UpperCAmelCase : List[str] = get_expected_output(_lowercase ) UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1_0_0_0] ) assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' ) hf_model.save_pretrained(_lowercase ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swiftformer_name""", default="""swiftformer_xs""", choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""], type=str, help="""Name of the SwiftFormer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""./converted_outputs/""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""") a : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
265
1
'''simple docstring''' from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class UpperCamelCase_ ( __magic_name__ ): def _lowercase( self , A ) -> float: return 0.0 def __lowerCamelCase ( _lowercase , _lowercase ) -> tuple[int | float, int | float]: UpperCAmelCase : Dict = min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] ) UpperCAmelCase : Any = max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def __lowerCamelCase ( _lowercase , _lowercase ) -> None: UpperCAmelCase : int = 5_1_2 UpperCAmelCase : List[str] = [1] + [0] * (size - 1) UpperCAmelCase : str = [filter_type.process(_lowercase ) for item in inputs] UpperCAmelCase : Any = [0] * (samplerate - size) # zero-padding outputs += filler UpperCAmelCase : Optional[Any] = np.abs(np.fft.fft(_lowercase ) ) UpperCAmelCase : Optional[int] = 2_0 * np.logaa(_lowercase ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(2_4 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) # Display within reasonable bounds UpperCAmelCase : Any = get_bounds(_lowercase , _lowercase ) plt.ylim(max([-8_0, bounds[0]] ) , min([8_0, bounds[1]] ) ) plt.ylabel("""Gain (dB)""" ) plt.plot(_lowercase ) plt.show() def __lowerCamelCase ( _lowercase , _lowercase ) -> None: UpperCAmelCase : Optional[Any] = 5_1_2 UpperCAmelCase : Union[str, Any] = [1] + [0] * (size - 1) UpperCAmelCase : Optional[int] = [filter_type.process(_lowercase ) for item in inputs] UpperCAmelCase : List[Any] = [0] * (samplerate - size) # zero-padding outputs += filler UpperCAmelCase : Optional[Any] = np.angle(np.fft.fft(_lowercase ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(2_4 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("""Phase shift (Radians)""" ) plt.plot(np.unwrap(_lowercase , -2 * pi ) ) plt.show()
265
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def __lowerCamelCase ( ) -> Any: raise RuntimeError("""CUDA out of memory.""" ) class UpperCamelCase_ ( nn.Module ): def __init__( self ) -> Any: super().__init__() UpperCAmelCase : Tuple = nn.Linear(3 , 4 ) UpperCAmelCase : Tuple = nn.BatchNormad(4 ) UpperCAmelCase : int = nn.Linear(4 , 5 ) def _lowercase( self , A ) -> Any: return self.lineara(self.batchnorm(self.lineara(A ) ) ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[int] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(A , [128, 64, 32, 16, 8] ) def _lowercase( self ) -> Any: UpperCAmelCase : Optional[Any] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A , A ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga UpperCAmelCase , UpperCAmelCase : Optional[int] = mock_training_loop_function("""hello""" ) self.assertListEqual(A , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, """hello"""] ) def _lowercase( self ) -> Any: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(A ): pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _lowercase( self ) -> Optional[int]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _lowercase( self ) -> Optional[Any]: @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A , A , A ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(A ) as cm: mock_training_loop_function(128 , """hello""" , """world""" ) self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] ) self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] ) def _lowercase( self ) -> int: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A ): raise ValueError("""Oops, we had an error!""" ) with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] ) @require_cuda def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated() UpperCAmelCase : List[str] = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , A ) UpperCAmelCase : Tuple = release_memory(A ) self.assertEqual(torch.cuda.memory_allocated() , A )
265
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a : Dict = logging.get_logger(__name__) a : Any = { """weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""", } class UpperCamelCase_ ( __magic_name__ ): lowercase = 'roc_bert' def __init__( self , A=30522 , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.0_2 , A=1e-12 , A=True , A=0 , A="absolute" , A=None , A=True , A=True , A=768 , A=910 , A=512 , A=24858 , A=True , **A , ) -> Tuple: UpperCAmelCase : Any = vocab_size UpperCAmelCase : int = max_position_embeddings UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : Optional[Any] = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : List[str] = intermediate_size UpperCAmelCase : Dict = hidden_act UpperCAmelCase : List[Any] = hidden_dropout_prob UpperCAmelCase : str = attention_probs_dropout_prob UpperCAmelCase : Union[str, Any] = initializer_range UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : Any = layer_norm_eps UpperCAmelCase : str = use_cache UpperCAmelCase : List[Any] = enable_pronunciation UpperCAmelCase : Any = enable_shape UpperCAmelCase : int = pronunciation_embed_dim UpperCAmelCase : List[str] = pronunciation_vocab_size UpperCAmelCase : List[str] = shape_embed_dim UpperCAmelCase : Tuple = shape_vocab_size UpperCAmelCase : List[str] = concat_input UpperCAmelCase : Optional[Any] = position_embedding_type UpperCAmelCase : Optional[Any] = classifier_dropout super().__init__(pad_token_id=A , **A )
265
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Optional[int] = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right a : List[str] = 2_5_0_0_0_4 a : List[str] = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = MBartTokenizer lowercase = MBartTokenizerFast lowercase = True lowercase = True def _lowercase( self ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : str = MBartTokenizer(A , keep_accents=A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self ) -> int: UpperCAmelCase : Optional[Any] = MBartTokenizer(A , keep_accents=A ) UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual( A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def _lowercase( self ) -> Union[str, Any]: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : Optional[int] = tempfile.mkdtemp() UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A ) UpperCAmelCase : int = tokenizer_p.save_pretrained(A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=True UpperCAmelCase : Optional[int] = tempfile.mkdtemp() UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A ) UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A ) # Checks it save with the same files self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : Any = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=False UpperCAmelCase : Optional[Any] = tempfile.mkdtemp() UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A ) UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(A ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : str = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( unittest.TestCase ): lowercase = 'facebook/mbart-large-en-ro' lowercase = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] lowercase = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] lowercase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE] @classmethod def _lowercase( cls ) -> Tuple: UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" ) UpperCAmelCase : int = 1 return cls def _lowercase( self ) -> Union[str, Any]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , A ) def _lowercase( self ) -> List[str]: self.assertIn(A , self.tokenizer.all_special_ids ) UpperCAmelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] UpperCAmelCase : int = self.tokenizer.decode(A , skip_special_tokens=A ) UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A ) self.assertEqual(A , A ) self.assertNotIn(self.tokenizer.eos_token , A ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[str] = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , A ) UpperCAmelCase : int = 10 UpperCAmelCase : List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , A ) self.assertEqual(len(A ) , A ) def _lowercase( self ) -> Tuple: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] ) def _lowercase( self ) -> Dict: UpperCAmelCase : Any = tempfile.mkdtemp() UpperCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(A ) UpperCAmelCase : Tuple = MBartTokenizer.from_pretrained(A ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A ) @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" ) UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(A , A ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) UpperCAmelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , A ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" ) UpperCAmelCase : Dict = self.tokenizer( text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" ) UpperCAmelCase : Dict = targets["""input_ids"""] UpperCAmelCase : Union[str, Any] = shift_tokens_right(A , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[Any] = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(A ) , { # A, test, EOS, en_XX """input_ids""": [[62, 3034, 2, 250004]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 250001, } , )
265
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = LongformerTokenizer lowercase = True lowercase = LongformerTokenizerFast lowercase = True def _lowercase( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase : List[str] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCAmelCase : int = dict(zip(A , range(len(A ) ) ) ) UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A ) ) def _lowercase( self , **A ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , **A ) -> int: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , A ) -> Optional[int]: UpperCAmelCase : Optional[Any] = """lower newer""" UpperCAmelCase : Optional[int] = """lower newer""" return input_text, output_text def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase : Dict = """lower newer""" UpperCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] UpperCAmelCase : Tuple = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokens + [tokenizer.unk_token] UpperCAmelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" ) UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A ) UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A ) UpperCAmelCase : List[str] = tokenizer.encode( """sequence builders""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : List[str] = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = self.get_tokenizer() UpperCAmelCase : List[Any] = """Encode this sequence.""" UpperCAmelCase : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments UpperCAmelCase : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A , A ) UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A , A ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A , A ) # Testing spaces after special tokens UpperCAmelCase : Union[str, Any] = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) UpperCAmelCase : Union[str, Any] = """Encode <mask> sequence""" UpperCAmelCase : Union[str, Any] = """Encode <mask>sequence""" UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Union[str, Any] = encoded.index(A ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A , A ) UpperCAmelCase : Tuple = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = encoded.index(A ) UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A , A ) def _lowercase( self ) -> Optional[int]: pass def _lowercase( self ) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence.""" UpperCAmelCase : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A ) UpperCAmelCase : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def _lowercase( self ) -> List[Any]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A ) self.assertEqual(post_processor_state["""add_prefix_space"""] , A ) self.assertEqual(post_processor_state["""trim_offsets"""] , A ) def _lowercase( self ) -> Optional[Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` UpperCAmelCase : int = f'''{text_of_1_token} {text_of_1_token}''' UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , ) UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , ) UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[Any] = f''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , ) UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
265
1
'''simple docstring''' def __lowerCamelCase ( _lowercase ) -> Optional[Any]: UpperCAmelCase : List[str] = len(_lowercase ) while cur > 1: # Find the maximum number in arr UpperCAmelCase : Optional[Any] = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi UpperCAmelCase : Tuple = arr[mi::-1] + arr[mi + 1 : len(_lowercase )] # Reverse whole list UpperCAmelCase : List[Any] = arr[cur - 1 :: -1] + arr[cur : len(_lowercase )] cur -= 1 return arr if __name__ == "__main__": a : Optional[Any] = input("""Enter numbers separated by a comma:\n""").strip() a : Union[str, Any] = [int(item) for item in user_input.split(""",""")] print(pancake_sort(unsorted))
265
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. a : Tuple = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class UpperCamelCase_ ( unittest.TestCase ): lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: lowercase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _lowercase( self , A , A , A ) -> Dict: UpperCAmelCase : Union[str, Any] = ZeroShotClassificationPipeline( model=A , tokenizer=A , candidate_labels=["""polics""", """health"""] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _lowercase( self , A , A ) -> Optional[int]: UpperCAmelCase : Dict = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # No kwarg UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , ["""politics"""] ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase : str = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" ) self.assertEqual( A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] ) self.assertEqual( A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) UpperCAmelCase : Dict = classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # https://github.com/huggingface/transformers/issues/13846 UpperCAmelCase : str = classifier(["""I am happy"""] , ["""positive""", """negative"""] ) self.assertEqual( A , [ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(1 ) ] , ) UpperCAmelCase : List[str] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] ) self.assertEqual( A , [ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(2 ) ] , ) with self.assertRaises(A ): classifier("""""" , candidate_labels="""politics""" ) with self.assertRaises(A ): classifier(A , candidate_labels="""politics""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" , candidate_labels=A ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A , ) self.run_entailment_id(A ) def _lowercase( self , A ) -> Any: UpperCAmelCase : Tuple = zero_shot_classifier.model.config UpperCAmelCase : Union[str, Any] = config.labelaid UpperCAmelCase : Tuple = zero_shot_classifier.entailment_id UpperCAmelCase : Any = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) UpperCAmelCase : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase : Any = {"""ENTAIL""": 0, """NON-ENTAIL""": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase : List[str] = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) UpperCAmelCase : Tuple = original_labelaid self.assertEqual(A , zero_shot_classifier.entailment_id ) @require_torch def _lowercase( self ) -> str: UpperCAmelCase : int = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( """Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] ) @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) UpperCAmelCase : Union[str, Any] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , ) UpperCAmelCase : List[Any] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[int] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" ) UpperCAmelCase : Optional[int] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase : str = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , ) self.assertEqual( nested_simplify(A ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def _lowercase( self ) -> List[str]: UpperCAmelCase : int = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" ) UpperCAmelCase : Tuple = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase : Any = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , ) self.assertEqual( nested_simplify(A ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
265
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class UpperCamelCase_ : lowercase = BlenderbotSmallConfig lowercase = {} lowercase = 'gelu' def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=20 , A=2 , A=1 , A=0 , ) -> str: UpperCAmelCase : str = parent UpperCAmelCase : Any = batch_size UpperCAmelCase : List[str] = seq_length UpperCAmelCase : Any = is_training UpperCAmelCase : Any = use_labels UpperCAmelCase : Union[str, Any] = vocab_size UpperCAmelCase : Tuple = hidden_size UpperCAmelCase : Optional[Any] = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : List[str] = intermediate_size UpperCAmelCase : Union[str, Any] = hidden_dropout_prob UpperCAmelCase : Tuple = attention_probs_dropout_prob UpperCAmelCase : Union[str, Any] = max_position_embeddings UpperCAmelCase : List[str] = eos_token_id UpperCAmelCase : Optional[int] = pad_token_id UpperCAmelCase : str = bos_token_id def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCAmelCase : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCAmelCase : Dict = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Optional[int] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCAmelCase : Optional[Any] = prepare_blenderbot_small_inputs_dict(A , A , A ) return config, inputs_dict def _lowercase( self , A , A ) -> Tuple: UpperCAmelCase : List[str] = TFBlenderbotSmallModel(config=A ).get_decoder() UpperCAmelCase : Dict = inputs_dict["""input_ids"""] UpperCAmelCase : Union[str, Any] = input_ids[:1, :] UpperCAmelCase : List[Any] = inputs_dict["""attention_mask"""][:1, :] UpperCAmelCase : int = inputs_dict["""head_mask"""] UpperCAmelCase : int = 1 # first forward pass UpperCAmelCase : List[str] = model(A , attention_mask=A , head_mask=A , use_cache=A ) UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCAmelCase : Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCAmelCase : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCAmelCase : List[str] = model(A , attention_mask=A )[0] UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , past_key_values=A )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCAmelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCAmelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase : Any = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(A , A , rtol=1e-3 ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Dict: if attention_mask is None: UpperCAmelCase : int = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase : Union[str, Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) lowercase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () lowercase = ( { 'conversational': TFBlenderbotSmallForConditionalGeneration, 'feature-extraction': TFBlenderbotSmallModel, 'summarization': TFBlenderbotSmallForConditionalGeneration, 'text2text-generation': TFBlenderbotSmallForConditionalGeneration, 'translation': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) lowercase = True lowercase = False lowercase = False def _lowercase( self ) -> Dict: UpperCAmelCase : List[Any] = TFBlenderbotSmallModelTester(self ) UpperCAmelCase : List[str] = ConfigTester(self , config_class=A ) def _lowercase( self ) -> Any: self.config_tester.run_common_tests() def _lowercase( self ) -> List[Any]: UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*A ) @require_tokenizers @require_tf class UpperCamelCase_ ( unittest.TestCase ): lowercase = [ 'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ' ' i\'m going to throw up.\nand why is that?' ] lowercase = 'facebook/blenderbot_small-90M' @cached_property def _lowercase( self ) -> Optional[Any]: # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" ) @cached_property def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : str = self.tokenizer(self.src_text , return_tensors="""tf""" ) UpperCAmelCase : Tuple = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A , ) UpperCAmelCase : str = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
265
'''simple docstring''' import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder a : List[Any] = """__DUMMY_TRANSFORMERS_USER__""" a : Tuple = """Dummy User""" a : Optional[Any] = """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""" a : Optional[Any] = """https://hub-ci.huggingface.co""" a : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}""" a : Tuple = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}""" a : str = Path("""~/.huggingface/hub_ci_token""").expanduser() @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Optional[int]: monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> List[str]: monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _lowercase ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Any: monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]: HfFolder.save_token(_lowercase ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( ) -> str: return HfApi(endpoint=_lowercase ) @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : str = HfFolder.get_token() HfFolder.save_token(_lowercase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Any: def _cleanup_repo(_lowercase ): hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def __lowerCamelCase ( _lowercase ) -> List[str]: @contextmanager def _temporary_repo(_lowercase ): try: yield repo_id finally: cleanup_repo(_lowercase ) return _temporary_repo @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: UpperCAmelCase : str = F'''repo_txt_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : List[Any] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data/text_data.txt""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : Optional[int] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple: UpperCAmelCase : List[Any] = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : List[str] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: return hf_private_dataset_repo_zipped_img_data_
265
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a : Any = logging.get_logger(__name__) def __lowerCamelCase ( _lowercase , _lowercase=False , _lowercase=False , _lowercase=False ) -> int: UpperCAmelCase : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') ) # embeddings rename_keys.extend( [ # text embeddings ("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""), ( """text_embeddings.position_embeddings.weight""", """vilt.embeddings.text_embeddings.position_embeddings.weight""", ), ("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""), ( """text_embeddings.token_type_embeddings.weight""", """vilt.embeddings.text_embeddings.token_type_embeddings.weight""", ), ("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""), ("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""), # patch embeddings ("""transformer.cls_token""", """vilt.embeddings.cls_token"""), ("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""), ("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""), ("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""), # token type embeddings ("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""), ] ) # final layernorm + pooler rename_keys.extend( [ ("""transformer.norm.weight""", """vilt.layernorm.weight"""), ("""transformer.norm.bias""", """vilt.layernorm.bias"""), ("""pooler.dense.weight""", """vilt.pooler.dense.weight"""), ("""pooler.dense.bias""", """vilt.pooler.dense.bias"""), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ("""vqa_classifier.0.weight""", """classifier.0.weight"""), ("""vqa_classifier.0.bias""", """classifier.0.bias"""), ("""vqa_classifier.1.weight""", """classifier.1.weight"""), ("""vqa_classifier.1.bias""", """classifier.1.bias"""), ("""vqa_classifier.3.weight""", """classifier.3.weight"""), ("""vqa_classifier.3.bias""", """classifier.3.bias"""), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ("""nlvr2_classifier.0.weight""", """classifier.0.weight"""), ("""nlvr2_classifier.0.bias""", """classifier.0.bias"""), ("""nlvr2_classifier.1.weight""", """classifier.1.weight"""), ("""nlvr2_classifier.1.bias""", """classifier.1.bias"""), ("""nlvr2_classifier.3.weight""", """classifier.3.weight"""), ("""nlvr2_classifier.3.bias""", """classifier.3.bias"""), ] ) else: pass return rename_keys def __lowerCamelCase ( _lowercase , _lowercase ) -> str: for i in range(config.num_hidden_layers ): UpperCAmelCase : int = """vilt.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase : List[Any] = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' ) UpperCAmelCase : Any = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase : Any = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase : List[str] = in_proj_bias[: config.hidden_size] UpperCAmelCase : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase : Dict = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase : List[Any] = in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( _lowercase ) -> List[str]: UpperCAmelCase : str = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(_lowercase , _lowercase ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: UpperCAmelCase : Optional[int] = dct.pop(_lowercase ) UpperCAmelCase : str = val @torch.no_grad() def __lowerCamelCase ( _lowercase , _lowercase ) -> Dict: UpperCAmelCase : List[Any] = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=_lowercase ) UpperCAmelCase : str = False UpperCAmelCase : List[str] = False UpperCAmelCase : Tuple = False UpperCAmelCase : Optional[Any] = False if "vqa" in checkpoint_url: UpperCAmelCase : Tuple = True UpperCAmelCase : Union[str, Any] = 3_1_2_9 UpperCAmelCase : Optional[Any] = """huggingface/label-files""" UpperCAmelCase : Any = """vqa2-id2label.json""" UpperCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase : Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()} UpperCAmelCase : Optional[int] = idalabel UpperCAmelCase : Any = {v: k for k, v in idalabel.items()} UpperCAmelCase : Tuple = ViltForQuestionAnswering(_lowercase ) elif "nlvr" in checkpoint_url: UpperCAmelCase : Optional[Any] = True UpperCAmelCase : Dict = 2 UpperCAmelCase : List[Any] = {0: """False""", 1: """True"""} UpperCAmelCase : str = {v: k for k, v in config.idalabel.items()} UpperCAmelCase : Union[str, Any] = 3 UpperCAmelCase : int = ViltForImagesAndTextClassification(_lowercase ) elif "irtr" in checkpoint_url: UpperCAmelCase : int = True UpperCAmelCase : Union[str, Any] = ViltForImageAndTextRetrieval(_lowercase ) elif "mlm_itm" in checkpoint_url: UpperCAmelCase : List[str] = True UpperCAmelCase : List[Any] = ViltForMaskedLM(_lowercase ) else: raise ValueError("""Unknown model type""" ) # load state_dict of original model, remove and rename some keys UpperCAmelCase : int = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" )["""state_dict"""] UpperCAmelCase : Union[str, Any] = create_rename_keys(_lowercase , _lowercase , _lowercase , _lowercase ) for src, dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) read_in_q_k_v(_lowercase , _lowercase ) if mlm_model or irtr_model: UpperCAmelCase : Optional[Any] = ["""itm_score.fc.weight""", """itm_score.fc.bias"""] for k in ignore_keys: state_dict.pop(_lowercase , _lowercase ) # load state dict into HuggingFace model model.eval() if mlm_model: UpperCAmelCase , UpperCAmelCase : int = model.load_state_dict(_lowercase , strict=_lowercase ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(_lowercase ) # Define processor UpperCAmelCase : Union[str, Any] = ViltImageProcessor(size=3_8_4 ) UpperCAmelCase : Optional[int] = BertTokenizer.from_pretrained("""bert-base-uncased""" ) UpperCAmelCase : Any = ViltProcessor(_lowercase , _lowercase ) # Forward pass on example inputs (image + text) if nlvr_model: UpperCAmelCase : Union[str, Any] = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=_lowercase ).raw ) UpperCAmelCase : Optional[int] = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=_lowercase ).raw ) UpperCAmelCase : str = ( """The left image contains twice the number of dogs as the right image, and at least two dogs in total are""" """ standing.""" ) UpperCAmelCase : str = processor(_lowercase , _lowercase , return_tensors="""pt""" ) UpperCAmelCase : str = processor(_lowercase , _lowercase , return_tensors="""pt""" ) UpperCAmelCase : int = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: UpperCAmelCase : Dict = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=_lowercase ).raw ) if mlm_model: UpperCAmelCase : Optional[int] = """a bunch of [MASK] laying on a [MASK].""" else: UpperCAmelCase : Any = """How many cats are there?""" UpperCAmelCase : Optional[int] = processor(_lowercase , _lowercase , return_tensors="""pt""" ) UpperCAmelCase : List[str] = model(**_lowercase ) # Verify outputs if mlm_model: UpperCAmelCase : Optional[int] = torch.Size([1, 1_1, 3_0_5_2_2] ) UpperCAmelCase : int = torch.tensor([-12.5061, -12.5123, -12.5174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , _lowercase , atol=1e-4 ) # verify masked token prediction equals "cats" UpperCAmelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: UpperCAmelCase : Optional[int] = torch.Size([1, 3_1_2_9] ) UpperCAmelCase : Optional[Any] = torch.tensor([-15.9495, -18.1472, -10.3041] ) assert torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , _lowercase , atol=1e-4 ) # verify vqa prediction equals "2" UpperCAmelCase : int = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: UpperCAmelCase : Optional[Any] = torch.Size([1, 2] ) UpperCAmelCase : Optional[Any] = torch.tensor([-2.8721, 2.1291] ) assert torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) assert outputs.logits.shape == expected_shape Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(F'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowercase ) processor.save_pretrained(_lowercase ) if __name__ == "__main__": a : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a : Tuple = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
265
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax a : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(__magic_name__ ) class UpperCamelCase_ ( __magic_name__ ): def __init__( self , **A ) -> List[str]: super().__init__(**A ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self , A , **A ) -> Optional[Any]: return super().__call__(A , **A ) def _lowercase( self , **A ) -> Optional[Any]: UpperCAmelCase : List[Any] = {} if "candidate_labels" in kwargs: UpperCAmelCase : Dict = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: UpperCAmelCase : Optional[Any] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def _lowercase( self , A , A=None , A="This is a photo of {}." ) -> Optional[Any]: UpperCAmelCase : int = load_image(A ) UpperCAmelCase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) UpperCAmelCase : List[str] = candidate_labels UpperCAmelCase : Tuple = [hypothesis_template.format(A ) for x in candidate_labels] UpperCAmelCase : Union[str, Any] = self.tokenizer(A , return_tensors=self.framework , padding=A ) UpperCAmelCase : Union[str, Any] = [text_inputs] return inputs def _lowercase( self , A ) -> Optional[int]: UpperCAmelCase : List[Any] = model_inputs.pop("""candidate_labels""" ) UpperCAmelCase : Optional[Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , A ): UpperCAmelCase : Optional[Any] = text_inputs[0] else: # Batching case. UpperCAmelCase : Any = text_inputs[0][0] UpperCAmelCase : Dict = self.model(**A , **A ) UpperCAmelCase : List[Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def _lowercase( self , A ) -> Union[str, Any]: UpperCAmelCase : int = model_outputs.pop("""candidate_labels""" ) UpperCAmelCase : int = model_outputs["""logits"""][0] if self.framework == "pt": UpperCAmelCase : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 ) UpperCAmelCase : Any = probs.tolist() if not isinstance(A , A ): UpperCAmelCase : Any = [scores] elif self.framework == "tf": UpperCAmelCase : List[str] = stable_softmax(A , axis=-1 ) UpperCAmelCase : Union[str, Any] = probs.numpy().tolist() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) UpperCAmelCase : Any = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(A , A ) , key=lambda A : -x[0] ) ] return result
265
1
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 a : List[str] = get_tests_dir("""fixtures""") class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: # A mock response for an HTTP head request to emulate server down UpperCAmelCase : Tuple = mock.Mock() UpperCAmelCase : List[str] = 500 UpperCAmelCase : Any = {} UpperCAmelCase : List[str] = HTTPError UpperCAmelCase : str = {} # Download this model to make sure it's in the cache. UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head: UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # This check we did call the fake head request mock_head.assert_called() def _lowercase( self ) -> Any: # This test is for deprecated behavior and can be removed in v5 UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" ) def _lowercase( self ) -> Union[str, Any]: with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" ) self.assertIsNotNone(A ) @is_staging_test class UpperCamelCase_ ( unittest.TestCase ): @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def _lowercase( cls ) -> List[str]: try: delete_repo(token=cls._token , repo_id="""test-image-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" ) except HTTPError: pass def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> List[str]: UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> Optional[int]: CustomImageProcessor.register_for_auto_class() UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
265
'''simple docstring''' from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __lowerCamelCase ( _lowercase ) -> Optional[Any]: return getitem, k def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]: return setitem, k, v def __lowerCamelCase ( _lowercase ) -> int: return delitem, k def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]: try: return fun(_lowercase , *_lowercase ), None except Exception as e: return None, e a : List[str] = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) a : List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] a : int = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] a : List[Any] = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] a : Tuple = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] a : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( """operations""" , ( pytest.param(_add_items , id="""add items""" ), pytest.param(_overwrite_items , id="""overwrite items""" ), pytest.param(_delete_items , id="""delete items""" ), pytest.param(_access_absent_items , id="""access absent items""" ), pytest.param(_add_with_resize_up , id="""add with resize up""" ), pytest.param(_add_with_resize_down , id="""add with resize down""" ), ) , ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : List[str] = HashMap(initial_block_size=4 ) UpperCAmelCase : Dict = {} for _, (fun, *args) in enumerate(_lowercase ): UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase ) UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase ) assert my_res == py_res assert str(_lowercase ) == str(_lowercase ) assert set(_lowercase ) == set(_lowercase ) assert len(_lowercase ) == len(_lowercase ) assert set(my.items() ) == set(py.items() ) def __lowerCamelCase ( ) -> List[Any]: def is_public(_lowercase ) -> bool: return not name.startswith("""_""" ) UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )} UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )} assert dict_public_names > hash_public_names
265
1
'''simple docstring''' from math import pi def __lowerCamelCase ( _lowercase , _lowercase ) -> float: return 2 * pi * radius * (angle / 3_6_0) if __name__ == "__main__": print(arc_length(9_0, 1_0))
265
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right a : List[str] = 2_5_0_0_0_4 a : List[str] = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = MBartTokenizer lowercase = MBartTokenizerFast lowercase = True lowercase = True def _lowercase( self ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : str = MBartTokenizer(A , keep_accents=A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self ) -> int: UpperCAmelCase : Optional[Any] = MBartTokenizer(A , keep_accents=A ) UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual( A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def _lowercase( self ) -> Union[str, Any]: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : Optional[int] = tempfile.mkdtemp() UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A ) UpperCAmelCase : int = tokenizer_p.save_pretrained(A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=True UpperCAmelCase : Optional[int] = tempfile.mkdtemp() UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A ) UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A ) # Checks it save with the same files self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : Any = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=False UpperCAmelCase : Optional[Any] = tempfile.mkdtemp() UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A ) UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(A ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : str = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( unittest.TestCase ): lowercase = 'facebook/mbart-large-en-ro' lowercase = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] lowercase = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] lowercase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE] @classmethod def _lowercase( cls ) -> Tuple: UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" ) UpperCAmelCase : int = 1 return cls def _lowercase( self ) -> Union[str, Any]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , A ) def _lowercase( self ) -> List[str]: self.assertIn(A , self.tokenizer.all_special_ids ) UpperCAmelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] UpperCAmelCase : int = self.tokenizer.decode(A , skip_special_tokens=A ) UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A ) self.assertEqual(A , A ) self.assertNotIn(self.tokenizer.eos_token , A ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[str] = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , A ) UpperCAmelCase : int = 10 UpperCAmelCase : List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , A ) self.assertEqual(len(A ) , A ) def _lowercase( self ) -> Tuple: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] ) def _lowercase( self ) -> Dict: UpperCAmelCase : Any = tempfile.mkdtemp() UpperCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(A ) UpperCAmelCase : Tuple = MBartTokenizer.from_pretrained(A ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A ) @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" ) UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(A , A ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) UpperCAmelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , A ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" ) UpperCAmelCase : Dict = self.tokenizer( text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" ) UpperCAmelCase : Dict = targets["""input_ids"""] UpperCAmelCase : Union[str, Any] = shift_tokens_right(A , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[Any] = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(A ) , { # A, test, EOS, en_XX """input_ids""": [[62, 3034, 2, 250004]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 250001, } , )
265
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Optional[int] = { """configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : str = [ """LILT_PRETRAINED_MODEL_ARCHIVE_LIST""", """LiltForQuestionAnswering""", """LiltForSequenceClassification""", """LiltForTokenClassification""", """LiltModel""", """LiltPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 a : List[str] = get_tests_dir("""fixtures""") class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: # A mock response for an HTTP head request to emulate server down UpperCAmelCase : Tuple = mock.Mock() UpperCAmelCase : List[str] = 500 UpperCAmelCase : Any = {} UpperCAmelCase : List[str] = HTTPError UpperCAmelCase : str = {} # Download this model to make sure it's in the cache. UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head: UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # This check we did call the fake head request mock_head.assert_called() def _lowercase( self ) -> Any: # This test is for deprecated behavior and can be removed in v5 UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" ) def _lowercase( self ) -> Union[str, Any]: with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" ) self.assertIsNotNone(A ) @is_staging_test class UpperCamelCase_ ( unittest.TestCase ): @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def _lowercase( cls ) -> List[str]: try: delete_repo(token=cls._token , repo_id="""test-image-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" ) except HTTPError: pass def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> List[str]: UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> Optional[int]: CustomImageProcessor.register_for_auto_class() UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
265
1
'''simple docstring''' import os from collections.abc import Iterator def __lowerCamelCase ( _lowercase = "." ) -> Iterator[str]: for dir_path, dir_names, filenames in os.walk(_lowercase ): UpperCAmelCase : Any = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"): yield os.path.join(_lowercase , _lowercase ).lstrip("""./""" ) def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: return F'''{i * ' '}*''' if i else "\n##" def __lowerCamelCase ( _lowercase , _lowercase ) -> str: UpperCAmelCase : List[Any] = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part: print(F'''{md_prefix(_lowercase )} {new_part.replace('_' , ' ' ).title()}''' ) return new_path def __lowerCamelCase ( _lowercase = "." ) -> None: UpperCAmelCase : Optional[int] = """""" for filepath in sorted(good_file_paths(_lowercase ) ): UpperCAmelCase , UpperCAmelCase : Union[str, Any] = os.path.split(_lowercase ) if filepath != old_path: UpperCAmelCase : Tuple = print_path(_lowercase , _lowercase ) UpperCAmelCase : int = (filepath.count(os.sep ) + 1) if filepath else 0 UpperCAmelCase : str = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" ) UpperCAmelCase : Optional[int] = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0] print(F'''{md_prefix(_lowercase )} [{filename}]({url})''' ) if __name__ == "__main__": print_directory_md(""".""")
265
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def __lowerCamelCase ( _lowercase ) -> Tuple: return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Any = create_tensor(_lowercase ) UpperCAmelCase : Union[str, Any] = gather(_lowercase ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : Any = [state.process_index] UpperCAmelCase : Union[str, Any] = gather_object(_lowercase ) assert len(_lowercase ) == state.num_processes, F'''{gathered_obj}, {len(_lowercase )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}''' def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Optional[int] = create_tensor(_lowercase ) UpperCAmelCase : List[str] = broadcast(_lowercase ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Tuple: # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device ) else: UpperCAmelCase : Tuple = torch.arange(state.num_processes ).to(state.device ) UpperCAmelCase : Optional[Any] = pad_across_processes(_lowercase ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def __lowerCamelCase ( _lowercase ) -> Dict: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Optional[Any] = create_tensor(_lowercase ) UpperCAmelCase : Optional[Any] = reduce(_lowercase , """sum""" ) UpperCAmelCase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[Any]: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Tuple = create_tensor(_lowercase ) UpperCAmelCase : Optional[int] = reduce(_lowercase , """mean""" ) UpperCAmelCase : str = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[int]: # For xla_spawn (TPUs) main() def __lowerCamelCase ( ) -> int: UpperCAmelCase : List[Any] = PartialState() state.print(F'''State: {state}''' ) state.print("""testing gather""" ) test_gather(_lowercase ) state.print("""testing gather_object""" ) test_gather_object(_lowercase ) state.print("""testing broadcast""" ) test_broadcast(_lowercase ) state.print("""testing pad_across_processes""" ) test_pad_across_processes(_lowercase ) state.print("""testing reduce_sum""" ) test_reduce_sum(_lowercase ) state.print("""testing reduce_mean""" ) test_reduce_mean(_lowercase ) if __name__ == "__main__": main()
265
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. a : Tuple = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class UpperCamelCase_ ( unittest.TestCase ): lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: lowercase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _lowercase( self , A , A , A ) -> Dict: UpperCAmelCase : Union[str, Any] = ZeroShotClassificationPipeline( model=A , tokenizer=A , candidate_labels=["""polics""", """health"""] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _lowercase( self , A , A ) -> Optional[int]: UpperCAmelCase : Dict = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # No kwarg UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , ["""politics"""] ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase : str = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" ) self.assertEqual( A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] ) self.assertEqual( A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) UpperCAmelCase : Dict = classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # https://github.com/huggingface/transformers/issues/13846 UpperCAmelCase : str = classifier(["""I am happy"""] , ["""positive""", """negative"""] ) self.assertEqual( A , [ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(1 ) ] , ) UpperCAmelCase : List[str] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] ) self.assertEqual( A , [ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(2 ) ] , ) with self.assertRaises(A ): classifier("""""" , candidate_labels="""politics""" ) with self.assertRaises(A ): classifier(A , candidate_labels="""politics""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" , candidate_labels=A ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A , ) self.run_entailment_id(A ) def _lowercase( self , A ) -> Any: UpperCAmelCase : Tuple = zero_shot_classifier.model.config UpperCAmelCase : Union[str, Any] = config.labelaid UpperCAmelCase : Tuple = zero_shot_classifier.entailment_id UpperCAmelCase : Any = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) UpperCAmelCase : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase : Any = {"""ENTAIL""": 0, """NON-ENTAIL""": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase : List[str] = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) UpperCAmelCase : Tuple = original_labelaid self.assertEqual(A , zero_shot_classifier.entailment_id ) @require_torch def _lowercase( self ) -> str: UpperCAmelCase : int = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( """Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] ) @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) UpperCAmelCase : Union[str, Any] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , ) UpperCAmelCase : List[Any] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[int] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" ) UpperCAmelCase : Optional[int] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase : str = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , ) self.assertEqual( nested_simplify(A ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def _lowercase( self ) -> List[str]: UpperCAmelCase : int = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" ) UpperCAmelCase : Tuple = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase : Any = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , ) self.assertEqual( nested_simplify(A ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
265
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ ( __magic_name__ ): def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]: super().__init__() self.register_modules( vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , ) def _lowercase( self , A = "auto" ) -> List[Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A ) def _lowercase( self ) -> Dict: self.enable_attention_slicing(A ) @torch.no_grad() def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]: if isinstance(A , A ): UpperCAmelCase : List[str] = 1 elif isinstance(A , A ): UpperCAmelCase : Dict = len(A ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(A )}.''' ) # get prompt text embeddings UpperCAmelCase : List[str] = self.tokenizer( A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCAmelCase : List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 ) UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCAmelCase : Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCAmelCase : List[str] if negative_prompt is None: UpperCAmelCase : Any = [""""""] elif type(A ) is not type(A ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !=''' f''' {type(A )}.''' ) elif isinstance(A , A ): UpperCAmelCase : Optional[int] = [negative_prompt] elif batch_size != len(A ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: UpperCAmelCase : Any = negative_prompt UpperCAmelCase : Dict = text_input_ids.shape[-1] UpperCAmelCase : List[Any] = self.tokenizer( A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , ) UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase : int = uncond_embeddings.shape[1] UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 ) UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) UpperCAmelCase : str = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCAmelCase : Dict = torch.randn( A , generator=A , device="""cpu""" , dtype=A ).to(self.device ) UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to( self.device ) else: UpperCAmelCase : int = torch.randn( A , generator=A , device=self.device , dtype=A ) UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A ) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCAmelCase : Optional[Any] = latents_reference.to(self.device ) UpperCAmelCase : Tuple = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2 UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2 UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx UpperCAmelCase : List[str] = 0 if dy < 0 else dy UpperCAmelCase : Union[str, Any] = max(-dx , 0 ) UpperCAmelCase : List[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(A ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCAmelCase : Optional[Any] = {} if accepts_eta: UpperCAmelCase : List[str] = eta for i, t in enumerate(self.progress_bar(A ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase : str = self.scheduler.scale_model_input(A , A ) # predict the noise residual UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample # perform guidance if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 ) UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A , A , A ) UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents UpperCAmelCase : Tuple = self.vae.decode(A ).sample UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to( self.device ) UpperCAmelCase , UpperCAmelCase : int = self.safety_checker( images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: UpperCAmelCase : Any = None if output_type == "pil": UpperCAmelCase : int = self.numpy_to_pil(A ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
265
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a : List[Any] = logging.get_logger(__name__) a : Union[str, Any] = torch.device("""cpu""") def __lowerCamelCase ( ) -> Any: UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) return im def __lowerCamelCase ( _lowercase ) -> Dict: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str: UpperCAmelCase : Union[str, Any] = dct.pop(_lowercase ) UpperCAmelCase : str = val def __lowerCamelCase ( _lowercase ) -> List[str]: UpperCAmelCase : Tuple = [] for k in state_dict.keys(): UpperCAmelCase : Dict = k if ".pwconv" in k: UpperCAmelCase : Union[str, Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: UpperCAmelCase : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: UpperCAmelCase : Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: UpperCAmelCase : Optional[Any] = k_new.split(""".""" ) if ls[2].isdigit(): UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[Any] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size UpperCAmelCase : List[Any] = 1_0_0_0 UpperCAmelCase : List[str] = """huggingface/label-files""" UpperCAmelCase : Tuple = """imagenet-1k-id2label.json""" UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase : Tuple = {int(_lowercase ): v for k, v in idalabel.items()} UpperCAmelCase : Tuple = idalabel UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": UpperCAmelCase : List[Any] = [3, 3, 6, 4] UpperCAmelCase : int = [4_8, 5_6, 1_1_2, 2_2_0] elif swiftformer_name == "swiftformer_s": UpperCAmelCase : str = [3, 3, 9, 6] UpperCAmelCase : str = [4_8, 6_4, 1_6_8, 2_2_4] elif swiftformer_name == "swiftformer_l1": UpperCAmelCase : List[Any] = [4, 3, 1_0, 5] UpperCAmelCase : Union[str, Any] = [4_8, 9_6, 1_9_2, 3_8_4] elif swiftformer_name == "swiftformer_l3": UpperCAmelCase : Any = [4, 4, 1_2, 6] UpperCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase ) else: UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" ) UpperCAmelCase : str = checkpoint UpperCAmelCase : Tuple = create_rename_keys(_lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) # load HuggingFace model UpperCAmelCase : str = SwiftFormerForImageClassification(_lowercase ).eval() hf_model.load_state_dict(_lowercase ) # prepare test inputs UpperCAmelCase : Any = prepare_img() UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) UpperCAmelCase : List[str] = processor(images=_lowercase , return_tensors="""pt""" ) # compare outputs from both models UpperCAmelCase : List[str] = get_expected_output(_lowercase ) UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1_0_0_0] ) assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' ) hf_model.save_pretrained(_lowercase ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swiftformer_name""", default="""swiftformer_xs""", choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""], type=str, help="""Name of the SwiftFormer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""./converted_outputs/""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""") a : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
265
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : def __init__( self , A , A=12 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=512 , A=0.0_2 , A=0 , A=None , ) -> Any: UpperCAmelCase : Optional[Any] = parent UpperCAmelCase : str = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : int = use_input_mask UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : Dict = vocab_size UpperCAmelCase : str = hidden_size UpperCAmelCase : List[Any] = projection_dim UpperCAmelCase : Tuple = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : Optional[Any] = intermediate_size UpperCAmelCase : Any = dropout UpperCAmelCase : List[Any] = attention_dropout UpperCAmelCase : Optional[Any] = max_position_embeddings UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Optional[Any] = scope UpperCAmelCase : Union[str, Any] = bos_token_id def _lowercase( self ) -> Tuple: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase : Tuple = input_mask.numpy() UpperCAmelCase , UpperCAmelCase : int = input_mask.shape UpperCAmelCase : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(A ): UpperCAmelCase : Tuple = 1 UpperCAmelCase : Optional[Any] = 0 UpperCAmelCase : int = self.get_config() return config, input_ids, tf.convert_to_tensor(A ) def _lowercase( self ) -> int: return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : int = TFBlipTextModel(config=A ) UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , training=A ) UpperCAmelCase : int = model(A , training=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Dict = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = (TFBlipTextModel,) if is_tf_available() else () lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> int: UpperCAmelCase : Union[str, Any] = BlipTextModelTester(self ) UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 ) def _lowercase( self ) -> Tuple: self.config_tester.run_common_tests() def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def _lowercase( self ) -> List[str]: pass def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def _lowercase( self ) -> Union[str, Any]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Dict: pass @slow def _lowercase( self ) -> Dict: for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Any = TFBlipTextModel.from_pretrained(A ) self.assertIsNotNone(A ) def _lowercase( self , A=True ) -> str: super().test_pt_tf_model_equivalence(allow_missing_keys=A )
265
1
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor def __lowerCamelCase ( _lowercase ) -> List[str]: if "cls_token" in name: UpperCAmelCase : int = name.replace("""cls_token""" , """vit.embeddings.cls_token""" ) if "mask_token" in name: UpperCAmelCase : List[str] = name.replace("""mask_token""" , """decoder.mask_token""" ) if "decoder_pos_embed" in name: UpperCAmelCase : Any = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" ) if "pos_embed" in name and "decoder" not in name: UpperCAmelCase : Dict = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" ) if "patch_embed.proj" in name: UpperCAmelCase : Tuple = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: UpperCAmelCase : List[str] = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" ) if "decoder_blocks" in name: UpperCAmelCase : List[Any] = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" ) if "blocks" in name: UpperCAmelCase : Tuple = name.replace("""blocks""" , """vit.encoder.layer""" ) if "attn.proj" in name: UpperCAmelCase : str = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: UpperCAmelCase : Tuple = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: UpperCAmelCase : Any = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: UpperCAmelCase : Optional[int] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: UpperCAmelCase : int = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: UpperCAmelCase : Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" ) if "decoder_embed" in name: UpperCAmelCase : int = name.replace("""decoder_embed""" , """decoder.decoder_embed""" ) if "decoder_norm" in name: UpperCAmelCase : List[str] = name.replace("""decoder_norm""" , """decoder.decoder_norm""" ) if "decoder_pred" in name: UpperCAmelCase : Optional[Any] = name.replace("""decoder_pred""" , """decoder.decoder_pred""" ) if "norm.weight" in name and "decoder" not in name: UpperCAmelCase : Dict = name.replace("""norm.weight""" , """vit.layernorm.weight""" ) if "norm.bias" in name and "decoder" not in name: UpperCAmelCase : List[Any] = name.replace("""norm.bias""" , """vit.layernorm.bias""" ) return name def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]: for key in orig_state_dict.copy().keys(): UpperCAmelCase : List[str] = orig_state_dict.pop(_lowercase ) if "qkv" in key: UpperCAmelCase : int = key.split(""".""" ) UpperCAmelCase : Tuple = int(key_split[1] ) if "decoder_blocks" in key: UpperCAmelCase : List[Any] = config.decoder_hidden_size UpperCAmelCase : Union[str, Any] = """decoder.decoder_layers.""" if "weight" in key: UpperCAmelCase : Tuple = val[:dim, :] UpperCAmelCase : int = val[dim : dim * 2, :] UpperCAmelCase : Tuple = val[-dim:, :] elif "bias" in key: UpperCAmelCase : str = val[:dim] UpperCAmelCase : str = val[dim : dim * 2] UpperCAmelCase : str = val[-dim:] else: UpperCAmelCase : Dict = config.hidden_size UpperCAmelCase : int = """vit.encoder.layer.""" if "weight" in key: UpperCAmelCase : List[str] = val[:dim, :] UpperCAmelCase : List[str] = val[dim : dim * 2, :] UpperCAmelCase : int = val[-dim:, :] elif "bias" in key: UpperCAmelCase : Any = val[:dim] UpperCAmelCase : Optional[int] = val[dim : dim * 2] UpperCAmelCase : Optional[int] = val[-dim:] else: UpperCAmelCase : Tuple = val return orig_state_dict def __lowerCamelCase ( _lowercase , _lowercase ) -> Dict: UpperCAmelCase : int = ViTMAEConfig() if "large" in checkpoint_url: UpperCAmelCase : List[str] = 1_0_2_4 UpperCAmelCase : Tuple = 4_0_9_6 UpperCAmelCase : Tuple = 2_4 UpperCAmelCase : Dict = 1_6 elif "huge" in checkpoint_url: UpperCAmelCase : int = 1_4 UpperCAmelCase : Optional[Any] = 1_2_8_0 UpperCAmelCase : Tuple = 5_1_2_0 UpperCAmelCase : Optional[Any] = 3_2 UpperCAmelCase : List[str] = 1_6 UpperCAmelCase : List[str] = ViTMAEForPreTraining(_lowercase ) UpperCAmelCase : List[str] = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" )["""model"""] UpperCAmelCase : Dict = ViTMAEImageProcessor(size=config.image_size ) UpperCAmelCase : Optional[Any] = convert_state_dict(_lowercase , _lowercase ) model.load_state_dict(_lowercase ) model.eval() UpperCAmelCase : Tuple = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg""" UpperCAmelCase : str = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) UpperCAmelCase : List[str] = ViTMAEImageProcessor(size=config.image_size ) UpperCAmelCase : Any = image_processor(images=_lowercase , return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) UpperCAmelCase : List[Any] = model(**_lowercase ) UpperCAmelCase : int = outputs.logits if "large" in checkpoint_url: UpperCAmelCase : List[str] = torch.tensor( [[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] ) elif "huge" in checkpoint_url: UpperCAmelCase : Optional[int] = torch.tensor( [[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] ) else: UpperCAmelCase : List[str] = torch.tensor( [[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] ) # verify logits assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowercase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_lowercase ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a : List[str] = parser.parse_args() convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
265
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification a : str = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co a : int = """main""" # Default branch name a : Any = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2""" # One particular commit (not the top of `main`) a : str = """aaaaaaa""" # This commit does not exist, so we should 404. a : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684""" # Sha-1 of config.json on the top of `main`, for checking purposes a : Any = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3""" @contextlib.contextmanager def __lowerCamelCase ( ) -> List[str]: print("""Welcome!""" ) yield print("""Bye!""" ) @contextlib.contextmanager def __lowerCamelCase ( ) -> Optional[int]: print("""Bonjour!""" ) yield print("""Au revoir!""" ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> List[Any]: # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec("""transformers""" ) is not None class UpperCamelCase_ ( unittest.TestCase ): @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Tuple: with ContextManagers([] ): print("""Transformers are awesome!""" ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Dict: with ContextManagers([context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Union[str, Any]: with ContextManagers([context_fr(), context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" ) @require_torch def _lowercase( self ) -> Optional[int]: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_tf def _lowercase( self ) -> int: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_flax def _lowercase( self ) -> Any: # Flax models don't have labels self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , [] )
265
1
'''simple docstring''' import argparse import os import re a : Optional[Any] = """src/transformers""" # Pattern that looks at the indentation in a line. a : Any = re.compile(R"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. a : Optional[int] = re.compile(R"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. a : Union[str, Any] = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. a : int = re.compile(R"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. a : str = re.compile(R"""\[([^\]]+)\]""") def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : List[str] = _re_indent.search(_lowercase ) return "" if search is None else search.groups()[0] def __lowerCamelCase ( _lowercase , _lowercase="" , _lowercase=None , _lowercase=None ) -> Union[str, Any]: UpperCAmelCase : Dict = 0 UpperCAmelCase : Optional[Any] = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(_lowercase ): index += 1 UpperCAmelCase : List[str] = ["""\n""".join(lines[:index] )] else: UpperCAmelCase : Optional[Any] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). UpperCAmelCase : Tuple = [lines[index]] index += 1 while index < len(_lowercase ) and (end_prompt is None or not lines[index].startswith(_lowercase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_lowercase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(_lowercase ) ) if index < len(_lowercase ) - 1: UpperCAmelCase : Tuple = [lines[index + 1]] index += 1 else: UpperCAmelCase : Optional[Any] = [] else: blocks.append("""\n""".join(_lowercase ) ) UpperCAmelCase : int = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_lowercase ) > 0: blocks.append("""\n""".join(_lowercase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_lowercase ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def __lowerCamelCase ( _lowercase ) -> Dict: def _inner(_lowercase ): return key(_lowercase ).lower().replace("""_""" , """""" ) return _inner def __lowerCamelCase ( _lowercase , _lowercase=None ) -> int: # If no key is provided, we use a noop. def noop(_lowercase ): return x if key is None: UpperCAmelCase : List[str] = noop # Constants are all uppercase, they go first. UpperCAmelCase : Any = [obj for obj in objects if key(_lowercase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. UpperCAmelCase : Tuple = [obj for obj in objects if key(_lowercase )[0].isupper() and not key(_lowercase ).isupper()] # Functions begin with a lowercase, they go last. UpperCAmelCase : List[Any] = [obj for obj in objects if not key(_lowercase )[0].isupper()] UpperCAmelCase : Dict = ignore_underscore(_lowercase ) return sorted(_lowercase , key=_lowercase ) + sorted(_lowercase , key=_lowercase ) + sorted(_lowercase , key=_lowercase ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: # This inner function sort imports between [ ]. def _replace(_lowercase ): UpperCAmelCase : str = match.groups()[0] if "," not in imports: return F'''[{imports}]''' UpperCAmelCase : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: UpperCAmelCase : List[Any] = keys[:-1] return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(_lowercase )] ) + "]" UpperCAmelCase : str = import_statement.split("""\n""" ) if len(_lowercase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. UpperCAmelCase : Optional[Any] = 2 if lines[1].strip() == """[""" else 1 UpperCAmelCase : List[Any] = [(i, _re_strip_line.search(_lowercase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] UpperCAmelCase : Tuple = sort_objects(_lowercase , key=lambda _lowercase : x[1] ) UpperCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_lowercase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: UpperCAmelCase : int = _re_bracket_content.sub(_replace , lines[1] ) else: UpperCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: UpperCAmelCase : Union[str, Any] = keys[:-1] UpperCAmelCase : str = get_indent(lines[1] ) + """, """.join([F'''"{k}"''' for k in sort_objects(_lowercase )] ) return "\n".join(_lowercase ) else: # Finally we have to deal with imports fitting on one line UpperCAmelCase : int = _re_bracket_content.sub(_replace , _lowercase ) return import_statement def __lowerCamelCase ( _lowercase , _lowercase=True ) -> Optional[int]: with open(_lowercase , encoding="""utf-8""" ) as f: UpperCAmelCase : Optional[Any] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 UpperCAmelCase : Union[str, Any] = split_code_in_indented_blocks( _lowercase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_lowercase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. UpperCAmelCase : int = main_blocks[block_idx] UpperCAmelCase : Any = block.split("""\n""" ) # Get to the start of the imports. UpperCAmelCase : List[str] = 0 while line_idx < len(_lowercase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: UpperCAmelCase : int = len(_lowercase ) else: line_idx += 1 if line_idx >= len(_lowercase ): continue # Ignore beginning and last line: they don't contain anything. UpperCAmelCase : str = """\n""".join(block_lines[line_idx:-1] ) UpperCAmelCase : Optional[Any] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. UpperCAmelCase : Any = split_code_in_indented_blocks(_lowercase , indent_level=_lowercase ) # We have two categories of import key: list or _import_structure[key].append/extend UpperCAmelCase : Optional[Any] = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. UpperCAmelCase : str = [(pattern.search(_lowercase ).groups()[0] if pattern.search(_lowercase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. UpperCAmelCase : List[Any] = [(i, key) for i, key in enumerate(_lowercase ) if key is not None] UpperCAmelCase : Dict = [x[0] for x in sorted(_lowercase , key=lambda _lowercase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. UpperCAmelCase : List[str] = 0 UpperCAmelCase : Optional[int] = [] for i in range(len(_lowercase ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: UpperCAmelCase : Tuple = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_lowercase ) count += 1 # And we put our main block back together with its first and last line. UpperCAmelCase : Optional[Any] = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_lowercase ): if check_only: return True else: print(F'''Overwriting {file}.''' ) with open(_lowercase , """w""" , encoding="""utf-8""" ) as f: f.write("""\n""".join(_lowercase ) ) def __lowerCamelCase ( _lowercase=True ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = [] for root, _, files in os.walk(_lowercase ): if "__init__.py" in files: UpperCAmelCase : Dict = sort_imports(os.path.join(_lowercase , """__init__.py""" ) , check_only=_lowercase ) if result: UpperCAmelCase : str = [os.path.join(_lowercase , """__init__.py""" )] if len(_lowercase ) > 0: raise ValueError(F'''Would overwrite {len(_lowercase )} files, run `make style`.''' ) if __name__ == "__main__": a : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") a : Tuple = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
265
'''simple docstring''' from itertools import count def __lowerCamelCase ( _lowercase = 5_0 ) -> int: UpperCAmelCase : Any = [1] * min_block_length for n in count(_lowercase ): fill_count_functions.append(1 ) for block_length in range(_lowercase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_0_0_0_0_0_0: break return n if __name__ == "__main__": print(F'''{solution() = }''')
265
1
'''simple docstring''' a : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] a : int = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] a : int = { 0: """Sunday""", 1: """Monday""", 2: """Tuesday""", 3: """Wednesday""", 4: """Thursday""", 5: """Friday""", 6: """Saturday""", } def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str: assert len(str(_lowercase ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 1_2, "month should be between 1 to 12" assert 1 <= day <= 3_1, "day should be between 1 to 31" # Doomsday algorithm: UpperCAmelCase : Optional[int] = year // 1_0_0 UpperCAmelCase : str = (5 * (century % 4) + 2) % 7 UpperCAmelCase : Optional[Any] = year % 1_0_0 UpperCAmelCase : Tuple = centurian % 1_2 UpperCAmelCase : Tuple = ( (centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 UpperCAmelCase : int = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0) else DOOMSDAY_LEAP[month - 1] ) UpperCAmelCase : int = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
265
'''simple docstring''' from __future__ import annotations import math class UpperCamelCase_ : def __init__( self , A ) -> None: UpperCAmelCase : Optional[int] = size # approximate the overall size of segment tree with given value UpperCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )] # create array to store lazy update UpperCAmelCase : Any = [0 for i in range(0 , 4 * size )] UpperCAmelCase : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update def _lowercase( self , A ) -> int: return idx * 2 def _lowercase( self , A ) -> int: return idx * 2 + 1 def _lowercase( self , A , A , A , A ) -> None: if left_element == right_element: UpperCAmelCase : str = a[left_element - 1] else: UpperCAmelCase : Tuple = (left_element + right_element) // 2 self.build(self.left(A ) , A , A , A ) self.build(self.right(A ) , mid + 1 , A , A ) UpperCAmelCase : str = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) def _lowercase( self , A , A , A , A , A , A ) -> bool: if self.flag[idx] is True: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : int = False if left_element != right_element: UpperCAmelCase : List[str] = self.lazy[idx] UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : int = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: UpperCAmelCase : Optional[Any] = val if left_element != right_element: UpperCAmelCase : Tuple = val UpperCAmelCase : int = val UpperCAmelCase : Any = True UpperCAmelCase : str = True return True UpperCAmelCase : str = (left_element + right_element) // 2 self.update(self.left(A ) , A , A , A , A , A ) self.update(self.right(A ) , mid + 1 , A , A , A , A ) UpperCAmelCase : List[str] = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) return True def _lowercase( self , A , A , A , A , A ) -> int | float: if self.flag[idx] is True: UpperCAmelCase : Any = self.lazy[idx] UpperCAmelCase : Any = False if left_element != right_element: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : Tuple = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : Tuple = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] UpperCAmelCase : Dict = (left_element + right_element) // 2 UpperCAmelCase : List[Any] = self.query(self.left(A ) , A , A , A , A ) UpperCAmelCase : str = self.query(self.right(A ) , mid + 1 , A , A , A ) return max(A , A ) def __str__( self ) -> str: return str([self.query(1 , 1 , self.size , A , A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": a : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] a : Optional[Any] = 1_5 a : Union[str, Any] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
265
1
'''simple docstring''' from ....utils import logging a : Tuple = logging.get_logger(__name__) class UpperCamelCase_ ( __magic_name__ ): def __init__( self , A , A=None , A=2048 ) -> int: UpperCAmelCase : str = config.__dict__ UpperCAmelCase : Union[str, Any] = modal_hidden_size if num_labels: UpperCAmelCase : Any = num_labels
265
'''simple docstring''' from PIL import Image def __lowerCamelCase ( _lowercase , _lowercase ) -> Image: def brightness(_lowercase ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(_lowercase ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 a : Optional[Any] = change_brightness(img, 1_0_0) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
265
1
'''simple docstring''' import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin a : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = BartphoTokenizer lowercase = False lowercase = True def _lowercase( self ) -> Any: super().setUp() UpperCAmelCase : Any = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] UpperCAmelCase : str = dict(zip(A , range(len(A ) ) ) ) UpperCAmelCase : Optional[Any] = {"""unk_token""": """<unk>"""} UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ) with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp: for token in vocab_tokens: fp.write(f'''{token} {vocab_tokens[token]}\n''' ) UpperCAmelCase : List[Any] = BartphoTokenizer(A , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self , **A ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , A ) -> List[Any]: UpperCAmelCase : Optional[int] = """This is a là test""" UpperCAmelCase : str = """This is a<unk><unk> test""" return input_text, output_text def _lowercase( self ) -> List[str]: UpperCAmelCase : Any = BartphoTokenizer(A , self.monolingual_vocab_file , **self.special_tokens_map ) UpperCAmelCase : Union[str, Any] = """This is a là test""" UpperCAmelCase : Any = """▁This ▁is ▁a ▁l à ▁t est""".split() UpperCAmelCase : str = tokenizer.tokenize(A ) self.assertListEqual(A , A ) UpperCAmelCase : Optional[int] = tokens + [tokenizer.unk_token] UpperCAmelCase : int = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
265
'''simple docstring''' import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class UpperCamelCase_ : def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Optional[int]: UpperCAmelCase : List[Any] = parent UpperCAmelCase : Optional[int] = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : Dict = use_input_mask UpperCAmelCase : str = use_token_type_ids UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Dict = hidden_size UpperCAmelCase : Dict = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : int = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : List[str] = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : str = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : List[str] = type_sequence_label_size UpperCAmelCase : int = initializer_range UpperCAmelCase : str = num_labels UpperCAmelCase : Optional[int] = num_choices UpperCAmelCase : Dict = scope UpperCAmelCase : Union[str, Any] = vocab_size - 1 def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Any = None if self.use_input_mask: UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : List[str] = None if self.use_labels: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Optional[int] = self.get_config() return config, input_ids, input_mask, token_labels def _lowercase( self ) -> Optional[Any]: return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.prepare_config_and_inputs() UpperCAmelCase : Any = True return config, input_ids, input_mask, token_labels def _lowercase( self , A , A , A ) -> int: UpperCAmelCase : str = GPTNeoXModel(config=A ) model.to(A ) model.eval() UpperCAmelCase : List[str] = model(A , attention_mask=A ) UpperCAmelCase : List[str] = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A ) -> Optional[int]: UpperCAmelCase : str = True UpperCAmelCase : Optional[Any] = GPTNeoXModel(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = model(A , attention_mask=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A , A ) -> List[str]: UpperCAmelCase : Tuple = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase( self , A , A , A , A ) -> Tuple: UpperCAmelCase : List[str] = self.num_labels UpperCAmelCase : Any = GPTNeoXForQuestionAnswering(A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase( self , A , A , A , A ) -> int: UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase( self , A , A , A , A ) -> str: UpperCAmelCase : List[Any] = self.num_labels UpperCAmelCase : Tuple = GPTNeoXForTokenClassification(A ) model.to(A ) model.eval() UpperCAmelCase : int = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = True UpperCAmelCase : str = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() # first forward pass UpperCAmelCase : List[str] = model(A , attention_mask=A , use_cache=A ) UpperCAmelCase : List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A ) UpperCAmelCase : Any = output_from_no_past["""hidden_states"""][0] UpperCAmelCase : List[str] = model( A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0] # select random slice UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) ) def _lowercase( self ) -> int: UpperCAmelCase : Tuple = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowercase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = GPTNeoXModelTester(self ) UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 ) def _lowercase( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> Optional[Any]: # This regression test was failing with PyTorch < 1.3 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A ) def _lowercase( self ) -> int: UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*A ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def _lowercase( self ) -> Any: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def _lowercase( self ) -> Optional[int]: pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def _lowercase( self , A ) -> str: UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : int = ids_tensor([1, 10] , config.vocab_size ) UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Dict = GPTNeoXModel(A ) original_model.to(A ) original_model.eval() UpperCAmelCase : List[str] = original_model(A ).last_hidden_state UpperCAmelCase : Any = original_model(A ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Any = {"""type""": scaling_type, """factor""": 1_0.0} UpperCAmelCase : str = GPTNeoXModel(A ) scaled_model.to(A ) scaled_model.eval() UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A , A , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: UpperCAmelCase : int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(A ) UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 UpperCAmelCase : List[str] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" UpperCAmelCase : Union[str, Any] = model.generate(**A , do_sample=A , max_new_tokens=20 ) UpperCAmelCase : Tuple = tokenizer.batch_decode(A )[0] self.assertEqual(A , A )
265
1
'''simple docstring''' import os import time import numpy as np import onnxruntime as ort a : List[Any] = """1""" a : Dict = """0""" a : List[str] = """1""" a : Optional[int] = ort.SessionOptions() a : Optional[Any] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print("""Create inference session...""") a : Union[str, Any] = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""] a : List[str] = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider) a : Optional[Any] = ort.RunOptions() a : Tuple = 1_2_8 a : List[Any] = 1 a : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa) a : Any = np.ones((batch, sequence), dtype=np.intaa) a : List[Any] = np.ones((batch, sequence), dtype=np.intaa) print("""Warm up phase...""") sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("""Start inference...""") a : str = time.time() a : int = 2_0_0_0 a : Union[str, Any] = {} for iter in range(max_iters): a : int = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1_0_0_0 / max_iters))
265
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> int: if b == 0: return 1 if (b % 2) == 0: return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) else: return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) def __lowerCamelCase ( _lowercase , _lowercase ) -> float: if b < 0: return 1 / actual_power(_lowercase , _lowercase ) return actual_power(_lowercase , _lowercase ) if __name__ == "__main__": print(power(-2, -3))
265
1
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = ProphetNetTokenizer lowercase = False def _lowercase( self ) -> Union[str, Any]: super().setUp() UpperCAmelCase : Any = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _lowercase( self , A ) -> Optional[Any]: UpperCAmelCase : Optional[int] = """UNwant\u00E9d,running""" UpperCAmelCase : str = """unwanted, running""" return input_text, output_text def _lowercase( self ) -> Any: UpperCAmelCase : Dict = self.tokenizer_class(self.vocab_file ) UpperCAmelCase : Dict = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(A , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [9, 6, 7, 12, 10, 11] ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : int = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def _lowercase( self ) -> Any: UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=A ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _lowercase( self ) -> Tuple: UpperCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=A , strip_accents=A ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] ) def _lowercase( self ) -> Dict: UpperCAmelCase : Any = BasicTokenizer(do_lower_case=A , strip_accents=A ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _lowercase( self ) -> Any: UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=A ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Any = BasicTokenizer(do_lower_case=A ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _lowercase( self ) -> str: UpperCAmelCase : str = BasicTokenizer(do_lower_case=A , strip_accents=A ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _lowercase( self ) -> Dict: UpperCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=A , strip_accents=A ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=A , never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def _lowercase( self ) -> List[str]: UpperCAmelCase : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] UpperCAmelCase : List[Any] = {} for i, token in enumerate(A ): UpperCAmelCase : Tuple = i UpperCAmelCase : Dict = WordpieceTokenizer(vocab=A , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] ) @require_torch def _lowercase( self ) -> Optional[int]: UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" ) UpperCAmelCase : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] UpperCAmelCase : Any = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] UpperCAmelCase : Dict = tokenizer(A , padding=A , return_tensors="""pt""" ) self.assertIsInstance(A , A ) UpperCAmelCase : Dict = list(batch.input_ids.numpy()[0] ) self.assertListEqual(A , A ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def _lowercase( self ) -> Tuple: self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def _lowercase( self ) -> Tuple: self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def _lowercase( self ) -> Any: self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) @slow def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" ) UpperCAmelCase : Any = tokenizer.encode("""sequence builders""" , add_special_tokens=A ) UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A ) UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
265
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : Any = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = AlbertTokenizer lowercase = AlbertTokenizerFast lowercase = True lowercase = True lowercase = True def _lowercase( self ) -> str: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : Optional[int] = AlbertTokenizer(A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self , A ) -> int: UpperCAmelCase : Optional[int] = """this is a test""" UpperCAmelCase : Dict = """this is a test""" return input_text, output_text def _lowercase( self ) -> int: UpperCAmelCase : Tuple = """<pad>""" UpperCAmelCase : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A ) def _lowercase( self ) -> Any: UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(A ) , 30000 ) def _lowercase( self ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _lowercase( self ) -> Union[str, Any]: if not self.test_rust_tokenizer: return UpperCAmelCase : int = self.get_tokenizer() UpperCAmelCase : List[str] = self.get_rust_tokenizer() UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé.""" UpperCAmelCase : str = tokenizer.tokenize(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A ) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A ) self.assertListEqual(A , A ) UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A ) self.assertListEqual(A , A ) def _lowercase( self ) -> Any: UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A ) UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : str = AlbertTokenizer(A ) UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" ) UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" ) UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def _lowercase( self ) -> Dict: # fmt: off UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
265
1
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification a : str = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co a : int = """main""" # Default branch name a : Any = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2""" # One particular commit (not the top of `main`) a : str = """aaaaaaa""" # This commit does not exist, so we should 404. a : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684""" # Sha-1 of config.json on the top of `main`, for checking purposes a : Any = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3""" @contextlib.contextmanager def __lowerCamelCase ( ) -> List[str]: print("""Welcome!""" ) yield print("""Bye!""" ) @contextlib.contextmanager def __lowerCamelCase ( ) -> Optional[int]: print("""Bonjour!""" ) yield print("""Au revoir!""" ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> List[Any]: # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec("""transformers""" ) is not None class UpperCamelCase_ ( unittest.TestCase ): @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Tuple: with ContextManagers([] ): print("""Transformers are awesome!""" ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Dict: with ContextManagers([context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Union[str, Any]: with ContextManagers([context_fr(), context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" ) @require_torch def _lowercase( self ) -> Optional[int]: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_tf def _lowercase( self ) -> int: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_flax def _lowercase( self ) -> Any: # Flax models don't have labels self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , [] )
265
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = StableDiffusionDiffEditPipeline lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} lowercase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase = frozenset([] ) def _lowercase( self ) -> Optional[int]: torch.manual_seed(0 ) UpperCAmelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , ) UpperCAmelCase : int = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , ) UpperCAmelCase : List[Any] = DDIMInverseScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , ) torch.manual_seed(0 ) UpperCAmelCase : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) UpperCAmelCase : Optional[Any] = CLIPTextModel(A ) UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCAmelCase : int = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _lowercase( self , A , A=0 ) -> Optional[Any]: UpperCAmelCase : Any = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A ) if str(A ).startswith("""mps""" ): UpperCAmelCase : List[Any] = torch.manual_seed(A ) else: UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : int = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> Optional[int]: UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : Any = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> str: UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : str = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def _lowercase( self ) -> List[Any]: if not hasattr(self.pipeline_class , """_optional_components""" ): return UpperCAmelCase : Dict = self.get_dummy_components() UpperCAmelCase : int = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A , A , A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) UpperCAmelCase : Any = self.get_dummy_inputs(A ) UpperCAmelCase : Optional[Any] = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) UpperCAmelCase : Dict = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A , A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A ) UpperCAmelCase : Tuple = pipe_loaded(**A )[0] UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max() self.assertLess(A , 1e-4 ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[int] = """cpu""" UpperCAmelCase : Optional[Any] = self.get_dummy_components() UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(A ) UpperCAmelCase : List[Any] = pipe.generate_mask(**A ) UpperCAmelCase : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) UpperCAmelCase : Optional[int] = np.array([0] * 9 ) UpperCAmelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = """cpu""" UpperCAmelCase : List[str] = self.get_dummy_components() UpperCAmelCase : Optional[Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : List[str] = pipe.invert(**A ).images UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Dict = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) def _lowercase( self ) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def _lowercase( self ) -> int: UpperCAmelCase : List[Any] = """cpu""" UpperCAmelCase : int = self.get_dummy_components() UpperCAmelCase : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""} UpperCAmelCase : int = DPMSolverMultistepScheduler(**A ) UpperCAmelCase : int = DPMSolverMultistepInverseScheduler(**A ) UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : Any = pipe.invert(**A ).images UpperCAmelCase : Dict = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Any = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) @require_torch_gpu @slow class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" ) UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) ) UpperCAmelCase : List[str] = raw_image def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Dict = torch.manual_seed(0 ) UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = """a bowl of fruit""" UpperCAmelCase : List[Any] = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Tuple = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents UpperCAmelCase : Any = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0] UpperCAmelCase : List[str] = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1 def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : int = """a bowl of fruit""" UpperCAmelCase : int = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Any = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents UpperCAmelCase : str = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0] UpperCAmelCase : Tuple = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1
265
1