code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __snake_case ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase__ ( self : List[Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCAmelCase__ ( self : str ): __snake_case: Any = 1 __snake_case: Dict = 3 __snake_case: List[str] = (32, 32) __snake_case: Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) return image @property def UpperCAmelCase__ ( self : Tuple ): torch.manual_seed(0 ) __snake_case: int = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowerCAmelCase__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def UpperCAmelCase__ ( self : List[Any] ): torch.manual_seed(0 ) __snake_case: Optional[Any] = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def UpperCAmelCase__ ( self : str ): torch.manual_seed(0 ) __snake_case: Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , ) return CLIPTextModel(lowerCAmelCase__ ) def UpperCAmelCase__ ( self : Tuple ): __snake_case: Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator __snake_case: Union[str, Any] = self.dummy_cond_unet_upscale __snake_case: Union[str, Any] = DDPMScheduler() __snake_case: Optional[Any] = DDIMScheduler(prediction_type="""v_prediction""" ) __snake_case: List[Any] = self.dummy_vae __snake_case: List[Any] = self.dummy_text_encoder __snake_case: Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __snake_case: int = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case: Dict = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk __snake_case: Optional[Any] = StableDiffusionUpscalePipeline( unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=350 , ) __snake_case: Any = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __snake_case: str = "A painting of a squirrel eating a burger" __snake_case: Optional[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) __snake_case: Optional[int] = sd_pipe( [prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) __snake_case: Any = output.images __snake_case: str = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) __snake_case: Any = sd_pipe( [prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCAmelCase__ , )[0] __snake_case: str = image[0, -3:, -3:, -1] __snake_case: Tuple = image_from_tuple[0, -3:, -3:, -1] __snake_case: str = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) __snake_case: Optional[int] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : Optional[int] ): __snake_case: List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator __snake_case: Optional[Any] = self.dummy_cond_unet_upscale __snake_case: str = DDPMScheduler() __snake_case: Union[str, Any] = DDIMScheduler(prediction_type="""v_prediction""" ) __snake_case: Optional[int] = self.dummy_vae __snake_case: Any = self.dummy_text_encoder __snake_case: Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __snake_case: Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case: Any = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk __snake_case: List[Any] = StableDiffusionUpscalePipeline( unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=350 , ) __snake_case: Any = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __snake_case: Dict = "A painting of a squirrel eating a burger" __snake_case: int = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) __snake_case: str = output.images assert image.shape[0] == 2 __snake_case: Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) __snake_case: int = sd_pipe( [prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) __snake_case: Optional[int] = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def UpperCAmelCase__ ( self : Optional[int] ): __snake_case: str = self.dummy_cond_unet_upscale __snake_case: Tuple = DDPMScheduler() __snake_case: Dict = DDIMScheduler(prediction_type="""v_prediction""" ) __snake_case: Optional[int] = self.dummy_vae __snake_case: str = self.dummy_text_encoder __snake_case: Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __snake_case: Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case: int = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("""RGB""" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 __snake_case: Any = unet.half() __snake_case: int = text_encoder.half() # make sure here that pndm scheduler skips prk __snake_case: List[Any] = StableDiffusionUpscalePipeline( unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=350 , ) __snake_case: int = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __snake_case: Dict = "A painting of a squirrel eating a burger" __snake_case: str = torch.manual_seed(0 ) __snake_case: Optional[Any] = sd_pipe( [prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="""np""" , ).images __snake_case: List[str] = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase__ ( self : Any ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Tuple ): __snake_case: List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) __snake_case: Optional[int] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat.npy""" ) __snake_case: Optional[int] = "stabilityai/stable-diffusion-x4-upscaler" __snake_case: str = StableDiffusionUpscalePipeline.from_pretrained(lowerCAmelCase__ ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __snake_case: Union[str, Any] = "a cat sitting on a park bench" __snake_case: Optional[Any] = torch.manual_seed(0 ) __snake_case: List[Any] = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="""np""" , ) __snake_case: int = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-3 def UpperCAmelCase__ ( self : List[str] ): __snake_case: Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) __snake_case: int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat_fp16.npy""" ) __snake_case: Union[str, Any] = "stabilityai/stable-diffusion-x4-upscaler" __snake_case: List[str] = StableDiffusionUpscalePipeline.from_pretrained( lowerCAmelCase__ , torch_dtype=torch.floataa , ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __snake_case: Any = "a cat sitting on a park bench" __snake_case: Any = torch.manual_seed(0 ) __snake_case: Any = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="""np""" , ) __snake_case: Dict = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def UpperCAmelCase__ ( self : List[str] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __snake_case: List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) __snake_case: int = "stabilityai/stable-diffusion-x4-upscaler" __snake_case: Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained( lowerCAmelCase__ , torch_dtype=torch.floataa , ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __snake_case: str = "a cat sitting on a park bench" __snake_case: Union[str, Any] = torch.manual_seed(0 ) __snake_case: Optional[int] = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , output_type="""np""" , ) __snake_case: Optional[int] = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
111
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase : Any = 16 UpperCAmelCase : str = 32 def _A ( SCREAMING_SNAKE_CASE : Accelerator , SCREAMING_SNAKE_CASE : int = 16 ): """simple docstring""" a__ : int =AutoTokenizer.from_pretrained("bert-base-cased" ) a__ : List[str] =load_dataset("glue" , "mrpc" ) def tokenize_function(SCREAMING_SNAKE_CASE : List[Any] ): # max_length=None => use the model max length (it's actually the default) a__ : int =tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): a__ : Dict =datasets.map( SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library a__ : Dict =tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(SCREAMING_SNAKE_CASE : str ): # On TPU it's best to pad everything to the same length or training will be very slow. a__ : Optional[Any] =128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": a__ : str =16 elif accelerator.mixed_precision != "no": a__ : Union[str, Any] =8 else: a__ : List[str] =None return tokenizer.pad( SCREAMING_SNAKE_CASE , padding="longest" , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors="pt" , ) # Instantiate dataloaders. a__ : Any =DataLoader( tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE ) a__ : int =DataLoader( tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase : str = mocked_dataloaders # noqa: F811 def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ): """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE ) == "1": a__ : Tuple =2 # Initialize accelerator a__ : int =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a__ : Optional[int] =config["lr"] a__ : Union[str, Any] =int(config["num_epochs"] ) a__ : Any =int(config["seed"] ) a__ : Dict =int(config["batch_size"] ) a__ : int =evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation a__ : int =1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: a__ : Dict =batch_size // MAX_GPU_BATCH_SIZE a__ : Tuple =MAX_GPU_BATCH_SIZE set_seed(SCREAMING_SNAKE_CASE ) a__ , a__ : Optional[int] =get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) a__ : List[str] =AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). a__ : List[str] =model.to(accelerator.device ) # Instantiate optimizer a__ : List[Any] =AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE ) # Instantiate scheduler a__ : Optional[int] =get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a__ , a__ , a__ , a__ , a__ : Optional[int] =accelerator.prepare( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) a__ : Dict =model(**SCREAMING_SNAKE_CASE ) a__ : List[Any] =outputs.loss a__ : List[str] =loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() a__ : Optional[Any] =0 for step, batch in enumerate(SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): a__ : Any =model(**SCREAMING_SNAKE_CASE ) a__ : str =outputs.logits.argmax(dim=-1 ) a__ , a__ : List[str] =accelerator.gather((predictions, batch["labels"]) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(SCREAMING_SNAKE_CASE ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples a__ : Optional[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen] a__ : Dict =references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , ) a__ : Tuple =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE ) def _A ( ): """simple docstring""" a__ : List[str] =argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) a__ : str =parser.parse_args() a__ : Optional[int] ={"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
95
0
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int: '''simple docstring''' return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str: '''simple docstring''' lowerCAmelCase : Any = 0 lowerCAmelCase : List[Any] = number while duplicate > 0: lowerCAmelCase : Dict = divmod(_UpperCAmelCase, 10 ) fact_sum += factorial(_UpperCAmelCase ) return fact_sum == number if __name__ == "__main__": print('''Program to check whether a number is a Krisnamurthy Number or not.''') __A : Optional[Any] = int(input('''Enter number: ''').strip()) print( F'{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.' )
138
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __lowerCAmelCase ( unittest.TestCase): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> Optional[Any]: '''simple docstring''' a__ : Union[str, Any] =size if size is not None else {"shortest_edge": 2_0} a__ : List[str] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8} a__ : Tuple =parent a__ : Union[str, Any] =batch_size a__ : List[str] =num_channels a__ : List[Any] =image_size a__ : str =min_resolution a__ : Optional[int] =max_resolution a__ : Tuple =do_resize a__ : Union[str, Any] =size a__ : List[Any] =do_center_crop a__ : List[str] =crop_size a__ : Optional[int] =do_flip_channel_order def _lowercase ( self ) -> Optional[int]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase): _lowercase : int = MobileViTImageProcessor if is_vision_available() else None def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' a__ : Tuple =MobileViTImageProcessingTester(self ) @property def _lowercase ( self ) -> List[str]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self ) -> List[str]: '''simple docstring''' a__ : str =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "center_crop" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_flip_channel_order" ) ) def _lowercase ( self ) -> Optional[int]: '''simple docstring''' a__ : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 2_0} ) self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} ) a__ : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2} ) self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' pass def _lowercase ( self ) -> Tuple: '''simple docstring''' a__ : List[str] =self.image_processing_class(**self.image_processor_dict ) # create random PIL images a__ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a__ : List[Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _lowercase ( self ) -> Any: '''simple docstring''' a__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a__ : int =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' a__ : int =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a__ : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a__ : List[str] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
95
0
"""simple docstring""" from __future__ import annotations def lowercase_ ( _UpperCAmelCase ): """simple docstring""" A_ : Optional[Any] = 2 A_ : Dict = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(_UpperCAmelCase ) if n > 1: factors.append(_UpperCAmelCase ) return factors if __name__ == "__main__": import doctest doctest.testmod()
167
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __lowerCAmelCase ( unittest.TestCase): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , ) -> Optional[int]: '''simple docstring''' a__ : str =size if size is not None else {"shortest_edge": 2_0} a__ : Union[str, Any] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8} a__ : Tuple =parent a__ : Optional[int] =batch_size a__ : Any =num_channels a__ : List[str] =image_size a__ : Dict =min_resolution a__ : List[Any] =max_resolution a__ : Dict =do_resize a__ : Union[str, Any] =size a__ : str =do_center_crop a__ : List[str] =crop_size def _lowercase ( self ) -> str: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase): _lowercase : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None def _lowercase ( self ) -> Tuple: '''simple docstring''' a__ : Optional[int] =MobileNetVaImageProcessingTester(self ) @property def _lowercase ( self ) -> List[str]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self ) -> Any: '''simple docstring''' a__ : List[str] =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "crop_size" ) ) def _lowercase ( self ) -> str: '''simple docstring''' a__ : Any =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 2_0} ) self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} ) a__ : Dict =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2} ) self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} ) def _lowercase ( self ) -> Any: '''simple docstring''' pass def _lowercase ( self ) -> Optional[int]: '''simple docstring''' a__ : Dict =self.image_processing_class(**self.image_processor_dict ) # create random PIL images a__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input a__ : List[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a__ : Dict =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _lowercase ( self ) -> int: '''simple docstring''' a__ : str =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a__ : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a__ : Union[str, Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _lowercase ( self ) -> Optional[int]: '''simple docstring''' a__ : Any =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a__ : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input a__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a__ : str =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
95
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCAmelCase : Tuple ={ """configuration_xmod""": [ """XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XmodConfig""", """XmodOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Dict =[ """XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""", """XmodForCausalLM""", """XmodForMaskedLM""", """XmodForMultipleChoice""", """XmodForQuestionAnswering""", """XmodForSequenceClassification""", """XmodForTokenClassification""", """XmodModel""", """XmodPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys _UpperCAmelCase : int =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
262
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Any = { """configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""], """tokenization_convbert""": ["""ConvBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ["""ConvBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[str] = [ """CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvBertForMaskedLM""", """ConvBertForMultipleChoice""", """ConvBertForQuestionAnswering""", """ConvBertForSequenceClassification""", """ConvBertForTokenClassification""", """ConvBertLayer""", """ConvBertModel""", """ConvBertPreTrainedModel""", """load_tf_weights_in_convbert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = [ """TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFConvBertForMaskedLM""", """TFConvBertForMultipleChoice""", """TFConvBertForQuestionAnswering""", """TFConvBertForSequenceClassification""", """TFConvBertForTokenClassification""", """TFConvBertLayer""", """TFConvBertModel""", """TFConvBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
95
0
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def _a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): __lowerCAmelCase = cva.getAffineTransform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return cva.warpAffine(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (rows, cols) ) if __name__ == "__main__": # read original image UpperCamelCase__ = cva.imread( str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""") ) # turn image in gray scale value UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape UpperCamelCase__ = gray_img.shape # set different points to rotate image UpperCamelCase__ = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) UpperCamelCase__ = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) UpperCamelCase__ = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) UpperCamelCase__ = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list UpperCamelCase__ = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations UpperCamelCase__ = plt.figure(1) UpperCamelCase__ = ["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""") plt.title(titles[i]) plt.axis("""off""") plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
92
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Dict = logging.get_logger(__name__) UpperCAmelCase : Tuple = { """caidas/swin2sr-classicalsr-x2-64""": ( """https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json""" ), } class __lowerCAmelCase ( UpperCamelCase__): _lowercase : Any = """swin2sr""" _lowercase : Tuple = { """hidden_size""": """embed_dim""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , lowerCAmelCase__=6_4 , lowerCAmelCase__=1 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8_0 , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=8 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=2 , lowerCAmelCase__=1.0 , lowerCAmelCase__="1conv" , lowerCAmelCase__="pixelshuffle" , **lowerCAmelCase__ , ) -> int: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) a__ : Optional[Any] =image_size a__ : Dict =patch_size a__ : Tuple =num_channels a__ : Union[str, Any] =embed_dim a__ : Optional[Any] =depths a__ : List[str] =len(lowerCAmelCase__ ) a__ : Any =num_heads a__ : Any =window_size a__ : str =mlp_ratio a__ : List[str] =qkv_bias a__ : Dict =hidden_dropout_prob a__ : List[str] =attention_probs_dropout_prob a__ : Dict =drop_path_rate a__ : Optional[Any] =hidden_act a__ : Union[str, Any] =use_absolute_embeddings a__ : Optional[Any] =layer_norm_eps a__ : List[Any] =initializer_range a__ : int =upscale a__ : Optional[int] =img_range a__ : Any =resi_connection a__ : Optional[Any] =upsampler
95
0
def __UpperCAmelCase ( a_): snake_case_ = 0 # if input_string is "aba" than new_input_string become "a|b|a" snake_case_ = "" snake_case_ = "" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(a_) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring snake_case_ = 0, 0 # length[i] shows the length of palindromic substring with center i snake_case_ = [1 for i in range(len(a_))] # for each character in new_string find corresponding palindromic string snake_case_ = 0 for j in range(len(a_)): snake_case_ = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1) while ( j - k >= 0 and j + k < len(a_) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 snake_case_ = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: snake_case_ = j - k + 1 # noqa: E741 snake_case_ = j + k - 1 # update max_length and start position if max_length < length[j]: snake_case_ = length[j] snake_case_ = j # create that string snake_case_ = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
178
from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class __lowerCAmelCase : pass
95
0
"""simple docstring""" import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def A_ ( _lowerCAmelCase : int = 3 ): """simple docstring""" if isinstance(_lowerCAmelCase, _lowerCAmelCase ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_lowerCAmelCase ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _a = QuantumRegister(_lowerCAmelCase, '''qr''' ) _a = ClassicalRegister(_lowerCAmelCase, '''cr''' ) _a = QuantumCircuit(_lowerCAmelCase, _lowerCAmelCase ) _a = number_of_qubits for i in range(_lowerCAmelCase ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_lowerCAmelCase ): quantum_circuit.cp(np.pi / 2 ** (counter - j), _lowerCAmelCase, _lowerCAmelCase ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_lowerCAmelCase, number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_lowerCAmelCase, _lowerCAmelCase ) # simulate with 10000 shots _a = Aer.get_backend('''qasm_simulator''' ) _a = execute(_lowerCAmelCase, _lowerCAmelCase, shots=1_00_00 ) return job.result().get_counts(_lowerCAmelCase ) if __name__ == "__main__": print( f'Total count for quantum fourier transform state is: \\n {quantum_fourier_transform(3)}' )
320
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class __lowerCAmelCase ( UpperCamelCase__): _lowercase : Optional[Any] = """philschmid/bart-large-cnn-samsum""" _lowercase : List[Any] = ( """This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """ """and returns a summary of the text.""" ) _lowercase : Any = """summarizer""" _lowercase : Any = AutoTokenizer _lowercase : str = AutoModelForSeqaSeqLM _lowercase : Optional[int] = ["""text"""] _lowercase : Optional[int] = ["""text"""] def _lowercase ( self , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' return self.pre_processor(lowerCAmelCase__ , return_tensors="pt" , truncation=lowerCAmelCase__ ) def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' return self.model.generate(**lowerCAmelCase__ )[0] def _lowercase ( self , lowerCAmelCase__ ) -> Any: '''simple docstring''' return self.pre_processor.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
95
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE :List[str] = { """configuration_layoutlmv3""": [ """LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv3Config""", """LayoutLMv3OnnxConfig""", ], """processing_layoutlmv3""": ["""LayoutLMv3Processor"""], """tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Optional[int] = ["""LayoutLMv3TokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :List[Any] = [ """LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv3ForQuestionAnswering""", """LayoutLMv3ForSequenceClassification""", """LayoutLMv3ForTokenClassification""", """LayoutLMv3Model""", """LayoutLMv3PreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Dict = [ """TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLayoutLMv3ForQuestionAnswering""", """TFLayoutLMv3ForSequenceClassification""", """TFLayoutLMv3ForTokenClassification""", """TFLayoutLMv3Model""", """TFLayoutLMv3PreTrainedModel""", ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Optional[int] = ["""LayoutLMv3FeatureExtractor"""] SCREAMING_SNAKE_CASE :Optional[int] = ["""LayoutLMv3ImageProcessor"""] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys SCREAMING_SNAKE_CASE :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
15
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer UpperCAmelCase : int = logging.get_logger(__name__) UpperCAmelCase : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCAmelCase : List[Any] = [ """small""", """small-base""", """medium""", """medium-base""", """intermediate""", """intermediate-base""", """large""", """large-base""", """xlarge""", """xlarge-base""", ] UpperCAmelCase : Optional[int] = { """vocab_file""": { """funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""", """funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""", """funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""", """funnel-transformer/medium-base""": ( """https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt""" ), """funnel-transformer/intermediate""": ( """https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt""" ), """funnel-transformer/intermediate-base""": ( """https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt""" ), """funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""", """funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""", """funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""", """funnel-transformer/xlarge-base""": ( """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""", """funnel-transformer/small-base""": ( """https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json""" ), """funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""", """funnel-transformer/medium-base""": ( """https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json""" ), """funnel-transformer/intermediate""": ( """https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json""" ), """funnel-transformer/intermediate-base""": ( """https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json""" ), """funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""", """funnel-transformer/large-base""": ( """https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json""" ), """funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""", """funnel-transformer/xlarge-base""": ( """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json""" ), }, } UpperCAmelCase : Optional[int] = {F"""funnel-transformer/{name}""": 512 for name in _model_names} UpperCAmelCase : Optional[int] = {F"""funnel-transformer/{name}""": {"""do_lower_case""": True} for name in _model_names} class __lowerCAmelCase ( UpperCamelCase__): _lowercase : str = VOCAB_FILES_NAMES _lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP _lowercase : Dict = PRETRAINED_INIT_CONFIGURATION _lowercase : Union[str, Any] = FunnelTokenizer _lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : int = 2 def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<sep>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<cls>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__="##" , **lowerCAmelCase__ , ) -> Optional[int]: '''simple docstring''' super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , clean_text=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , wordpieces_prefix=lowerCAmelCase__ , **lowerCAmelCase__ , ) a__ : Optional[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars ): a__ : List[str] =getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) ) a__ : Union[str, Any] =do_lower_case a__ : Any =strip_accents a__ : Optional[Any] =tokenize_chinese_chars a__ : Dict =normalizer_class(**lowerCAmelCase__ ) a__ : Any =do_lower_case def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> str: '''simple docstring''' a__ : Dict =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]: '''simple docstring''' a__ : Optional[int] =[self.sep_token_id] a__ : Union[str, Any] =[self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]: '''simple docstring''' a__ : Tuple =self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ ) return tuple(lowerCAmelCase__ )
95
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } _lowerCAmelCase : str = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def lowerCAmelCase ( _lowerCAmelCase : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = {} with open(_lowerCAmelCase , "r" ) as file: for line_number, line in enumerate(_lowerCAmelCase ): UpperCAmelCase__ = line.strip() if line: UpperCAmelCase__ = line.split() UpperCAmelCase__ = line_number UpperCAmelCase__ = words[0] UpperCAmelCase__ = value return result def lowerCAmelCase ( _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] ): """simple docstring""" for attribute in key.split("." ): UpperCAmelCase__ = getattr(_lowerCAmelCase , _lowerCAmelCase ) UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_lowerCAmelCase ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split("." )[-1]] UpperCAmelCase__ = "param" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = hf_pointer for attribute in hf_param_name.split("." ): UpperCAmelCase__ = getattr(_lowerCAmelCase , _lowerCAmelCase ) UpperCAmelCase__ = shape_pointer.shape # let's reduce dimension UpperCAmelCase__ = value[0] else: UpperCAmelCase__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase__ = value elif weight_type == "weight_g": UpperCAmelCase__ = value elif weight_type == "weight_v": UpperCAmelCase__ = value elif weight_type == "bias": UpperCAmelCase__ = value elif weight_type == "param": for attribute in hf_param_name.split("." ): UpperCAmelCase__ = getattr(_lowerCAmelCase , _lowerCAmelCase ) UpperCAmelCase__ = value else: UpperCAmelCase__ = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def lowerCAmelCase ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ): """simple docstring""" UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_lowerCAmelCase ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split("." )[-1]] UpperCAmelCase__ = "param" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = ".".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = ".".join([key, hf_param_name] ) else: UpperCAmelCase__ = key UpperCAmelCase__ = value if "lm_head" in full_key else value[0] _lowerCAmelCase : int = { """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def lowerCAmelCase ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Optional[int]=None ): """simple docstring""" UpperCAmelCase__ = False for key, mapped_key in MAPPING.items(): UpperCAmelCase__ = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: UpperCAmelCase__ = True if "*" in mapped_key: UpperCAmelCase__ = name.split(_lowerCAmelCase )[0].split("." )[-2] UpperCAmelCase__ = mapped_key.replace("*" , _lowerCAmelCase ) if "weight_g" in name: UpperCAmelCase__ = "weight_g" elif "weight_v" in name: UpperCAmelCase__ = "weight_v" elif "bias" in name: UpperCAmelCase__ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ = "weight" else: UpperCAmelCase__ = None if hf_dict is not None: rename_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return is_used return is_used def lowerCAmelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = [] UpperCAmelCase__ = fairseq_model.state_dict() UpperCAmelCase__ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , ) UpperCAmelCase__ = True else: UpperCAmelCase__ = load_wavaveca_layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowerCAmelCase ( _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = full_name.split("conv_layers." )[-1] UpperCAmelCase__ = name.split("." ) UpperCAmelCase__ = int(items[0] ) UpperCAmelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_lowerCAmelCase ) @torch.no_grad() def lowerCAmelCase ( _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : int=False ): """simple docstring""" if config_path is not None: UpperCAmelCase__ = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) else: UpperCAmelCase__ = WavaVecaConfig() if is_seq_class: UpperCAmelCase__ = read_txt_into_dict(_lowerCAmelCase ) UpperCAmelCase__ = idalabel UpperCAmelCase__ = WavaVecaForSequenceClassification(_lowerCAmelCase ) UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) feature_extractor.save_pretrained(_lowerCAmelCase ) elif is_finetuned: if dict_path: UpperCAmelCase__ = Dictionary.load(_lowerCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase__ = target_dict.pad_index UpperCAmelCase__ = target_dict.bos_index UpperCAmelCase__ = target_dict.eos_index UpperCAmelCase__ = len(target_dict.symbols ) UpperCAmelCase__ = os.path.join(_lowerCAmelCase , "vocab.json" ) if not os.path.isdir(_lowerCAmelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCAmelCase ) ) return os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase ) UpperCAmelCase__ = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase__ = 0 UpperCAmelCase__ = 1 with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(_lowerCAmelCase , _lowerCAmelCase ) UpperCAmelCase__ = WavaVecaCTCTokenizer( _lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCAmelCase , ) UpperCAmelCase__ = True if config.feat_extract_norm == "layer" else False UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase ) processor.save_pretrained(_lowerCAmelCase ) UpperCAmelCase__ = WavaVecaForCTC(_lowerCAmelCase ) else: UpperCAmelCase__ = WavaVecaForPreTraining(_lowerCAmelCase ) if is_finetuned or is_seq_class: UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: UpperCAmelCase__ = argparse.Namespace(task="audio_pretraining" ) UpperCAmelCase__ = fairseq.tasks.setup_task(_lowerCAmelCase ) UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCAmelCase ) UpperCAmelCase__ = model[0].eval() recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , not is_finetuned ) hf_wavavec.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) _lowerCAmelCase : Union[str, Any] = parser.parse_args() _lowerCAmelCase : Optional[Any] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
169
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __lowerCAmelCase ( UpperCamelCase__): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = "arrow" , **lowerCAmelCase__ , ) -> Optional[int]: '''simple docstring''' super().__init__( split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , **lowerCAmelCase__ , ) a__ : int =load_from_cache_file a__ : Tuple =file_format a__ : List[Any] =Spark( df=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , working_dir=lowerCAmelCase__ , **lowerCAmelCase__ , ) def _lowercase ( self ) -> str: '''simple docstring''' if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) a__ : str =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=lowerCAmelCase__ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
95
0
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class __lowerCAmelCase ( UpperCamelCase__ ): """simple docstring""" A__ : Tuple = """M-CLIP""" def __init__( self : Tuple , _snake_case : Tuple=1024 , _snake_case : List[str]=768 , **_snake_case : str ): __lowercase : int = transformerDimSize __lowercase : Dict = imageDimSize super().__init__(**lowerCAmelCase__ ) class __lowerCAmelCase ( UpperCamelCase__ ): """simple docstring""" A__ : Optional[Any] = MCLIPConfig def __init__( self : Tuple , _snake_case : Optional[int] , *_snake_case : List[Any] , **_snake_case : List[str] ): super().__init__(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) __lowercase : Tuple = XLMRobertaModel(lowerCAmelCase__ ) __lowercase : List[str] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def snake_case_ ( self : Union[str, Any] , _snake_case : Tuple , _snake_case : Union[str, Any] ): __lowercase : Optional[Any] = self.transformer(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0] __lowercase : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(lowerCAmelCase__ ), embs
156
from math import pi def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): """simple docstring""" return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
95
0
"""simple docstring""" from argparse import ArgumentParser from . import BaseTransformersCLICommand def snake_case ( A__ ): return DownloadCommand(args.model ,args.cache_dir ,args.force ,args.trust_remote_code ) class UpperCamelCase_ (UpperCamelCase__ ): @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : List[str] ) -> Tuple: UpperCAmelCase_ : List[str] = parser.add_parser("download" ) download_parser.add_argument( "--cache-dir" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="Path to location to store the models" ) download_parser.add_argument( "--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" ) download_parser.add_argument( "--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , ) download_parser.add_argument("model" , type=lowerCAmelCase__ , help="Name of the model to download" ) download_parser.set_defaults(func=lowerCAmelCase__ ) def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> Optional[Any]: UpperCAmelCase_ : Tuple = model UpperCAmelCase_ : Optional[int] = cache UpperCAmelCase_ : Any = force UpperCAmelCase_ : Dict = trust_remote_code def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
268
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging UpperCAmelCase : int = { """cola""": 2, """mnli""": 3, """mrpc""": 2, """sst-2""": 2, """sts-b""": 1, """qqp""": 2, """qnli""": 2, """rte""": 2, """wnli""": 2, } logging.set_verbosity_info() def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" a__ : Optional[int] =XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE ) a__ : Dict =finetuning_task.lower() if finetuning_task is not None else "" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' ) a__ : List[str] =finetuning_task a__ : Tuple =GLUE_TASKS_NUM_LABELS[finetuning_task] a__ : List[Any] =XLNetForSequenceClassification(SCREAMING_SNAKE_CASE ) elif "squad" in finetuning_task: a__ : Optional[int] =finetuning_task a__ : Dict =XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE ) else: a__ : List[Any] =XLNetLMHeadModel(SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Save pytorch-model a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) print(f'''Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE ) print(f'''Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' ) with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--xlnet_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained XLNet model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the folder to store the PyTorch model or dataset/vocab.""", ) parser.add_argument( """--finetuning_task""", default=None, type=str, help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""", ) UpperCAmelCase : int = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
95
0
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __snake_case ( UpperCamelCase__ ): '''simple docstring''' lowerCAmelCase__ = ["""image_processor""", """tokenizer"""] lowerCAmelCase__ = """BridgeTowerImageProcessor""" lowerCAmelCase__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self : Tuple , A : List[str] , A : Optional[int] ): super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) def __call__( self : Optional[int] , A : Tuple , A : List[str] = None , A : Optional[Any] = True , A : Tuple = False , A : Optional[int] = None , A : Union[str, Any] = None , A : Any = 0 , A : Any = None , A : Optional[int] = None , A : str = None , A : Optional[int] = False , A : Optional[int] = False , A : Optional[Any] = False , A : int = False , A : Tuple = True , A : str = None , **A : Dict , ): __snake_case: int = self.tokenizer( text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , ) # add pixel_values + pixel_mask __snake_case: str = self.image_processor( lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_center_crop=lowerCAmelCase__ , **lowerCAmelCase__ ) encoding.update(lowerCAmelCase__ ) return encoding def UpperCAmelCase__ ( self : Tuple , *A : int , **A : Tuple ): return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) def UpperCAmelCase__ ( self : List[str] , *A : Optional[Any] , **A : Union[str, Any] ): return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) @property def UpperCAmelCase__ ( self : List[Any] ): __snake_case: str = self.tokenizer.model_input_names __snake_case: Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
111
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase : Optional[int] = { """google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""", # See all CANINE models at https://huggingface.co/models?filter=canine } class __lowerCAmelCase ( UpperCamelCase__): _lowercase : List[Any] = """canine""" def __init__( self , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__=0XE0_00 , lowerCAmelCase__=0XE0_01 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=8 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1_2_8 , **lowerCAmelCase__ , ) -> Dict: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) a__ : Optional[int] =max_position_embeddings a__ : str =hidden_size a__ : Optional[Any] =num_hidden_layers a__ : Tuple =num_attention_heads a__ : Optional[Any] =intermediate_size a__ : Optional[int] =hidden_act a__ : List[Any] =hidden_dropout_prob a__ : Union[str, Any] =attention_probs_dropout_prob a__ : Optional[Any] =initializer_range a__ : Union[str, Any] =type_vocab_size a__ : Optional[int] =layer_norm_eps # Character config: a__ : int =downsampling_rate a__ : Optional[Any] =upsampling_kernel_size a__ : Union[str, Any] =num_hash_functions a__ : Any =num_hash_buckets a__ : int =local_transformer_stride
95
0
from typing import Union import fire import torch from tqdm import tqdm def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = "cpu", _UpperCAmelCase = None ) -> Any: '''simple docstring''' lowerCAmelCase : int = torch.load(_UpperCAmelCase, map_location=_UpperCAmelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(_UpperCAmelCase, torch.Tensor ): raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' ) lowerCAmelCase : Tuple = v.half() if save_path is None: # overwrite src_path lowerCAmelCase : Optional[int] = src_path torch.save(_UpperCAmelCase, _UpperCAmelCase ) if __name__ == "__main__": fire.Fire(convert)
138
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device UpperCAmelCase : int = False class __lowerCAmelCase ( unittest.TestCase): pass @nightly @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase): def _lowercase ( self ) -> Tuple: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self ) -> Any: '''simple docstring''' a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) a__ : int =load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) a__ : Optional[Any] =torch.manual_seed(0 ) a__ : Optional[Any] =pipe.dual_guided( prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCAmelCase__ ) a__ : str =VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) a__ : Optional[Any] =generator.manual_seed(0 ) a__ : Tuple =pipe.dual_guided( prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _lowercase ( self ) -> Any: '''simple docstring''' a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) a__ : Optional[Any] ="cyberpunk 2077" a__ : int =load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) a__ : Union[str, Any] =torch.manual_seed(0 ) a__ : Tuple =pipe.dual_guided( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images a__ : int =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a__ : Any =np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 a__ : str ="A painting of a squirrel eating a burger " a__ : Optional[int] =torch.manual_seed(0 ) a__ : str =pipe.text_to_image( prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" ).images a__ : Any =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a__ : Optional[int] =np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 a__ : Optional[Any] =pipe.image_variation(lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="numpy" ).images a__ : Union[str, Any] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a__ : Any =np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
95
0
"""simple docstring""" # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib _lowerCamelCase : Optional[Any] = get_logger() _lowerCamelCase : Optional[dict] = None class lowercase ( TensorFormatter[Mapping, """jax.Array""", Mapping]): def __init__( self : int , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Union[str, Any]=None , **_lowerCamelCase : Union[str, Any] ): """simple docstring""" super().__init__(features=lowerCAmelCase__ ) import jax from jaxlib.xla_client import Device if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError( F"""Expected {device} to be a `str` not {type(lowerCAmelCase__ )}, as `jaxlib.xla_extension.Device` """ '''is not serializable neither with `pickle` nor with `dill`. Instead you can surround ''' '''the device with `str()` to get its string identifier that will be internally mapped ''' '''to the actual `jaxlib.xla_extension.Device`.''' ) A_ : List[Any] = device if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A_ : int = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( F"""Device with string identifier {self.device} not listed among the available """ F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """ F"""device: {str(jax.devices()[0] )}.""" ) A_ : int = str(jax.devices()[0] ) A_ : Union[str, Any] = jnp_array_kwargs @staticmethod def a_ ( ): """simple docstring""" import jax return {str(lowerCAmelCase__ ): device for device in jax.devices()} def a_ ( self : Dict , _lowerCamelCase : List[Any] ): """simple docstring""" import jax import jax.numpy as jnp if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and column: if all( isinstance(lowerCAmelCase__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(lowerCAmelCase__ , axis=0 ) return column def a_ ( self : Optional[Any] , _lowerCamelCase : List[Any] ): """simple docstring""" import jax import jax.numpy as jnp if isinstance(lowerCAmelCase__ , (str, bytes, type(lowerCAmelCase__ )) ): return value elif isinstance(lowerCAmelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() A_ : Optional[Any] = {} if isinstance(lowerCAmelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: A_ : Optional[Any] = {"dtype": jnp.intaa} else: A_ : Dict = {"dtype": jnp.intaa} elif isinstance(lowerCAmelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): A_ : Tuple = {"dtype": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(lowerCAmelCase__ , PIL.Image.Image ): A_ : Tuple = np.asarray(lowerCAmelCase__ ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A_ : List[str] = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(lowerCAmelCase__ , **{**default_dtype, **self.jnp_array_kwargs} ) def a_ ( self : Tuple , _lowerCamelCase : str ): """simple docstring""" import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(lowerCAmelCase__ , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(lowerCAmelCase__ , '''__array__''' ) and not isinstance(lowerCAmelCase__ , jax.Array ): A_ : List[Any] = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(lowerCAmelCase__ , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(lowerCAmelCase__ ) for substruct in data_struct] ) elif isinstance(lowerCAmelCase__ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(lowerCAmelCase__ ) for substruct in data_struct] ) return self._tensorize(lowerCAmelCase__ ) def a_ ( self : Tuple , _lowerCamelCase : Any ): """simple docstring""" return map_nested(self._recursive_tensorize , lowerCAmelCase__ , map_list=lowerCAmelCase__ ) def a_ ( self : Union[str, Any] , _lowerCamelCase : Any ): """simple docstring""" A_ : Dict = self.numpy_arrow_extractor().extract_row(lowerCAmelCase__ ) A_ : List[Any] = self.python_features_decoder.decode_row(lowerCAmelCase__ ) return self.recursive_tensorize(lowerCAmelCase__ ) def a_ ( self : Any , _lowerCamelCase : Any ): """simple docstring""" A_ : Optional[int] = self.numpy_arrow_extractor().extract_column(lowerCAmelCase__ ) A_ : str = self.python_features_decoder.decode_column(lowerCAmelCase__ , pa_table.column_names[0] ) A_ : int = self.recursive_tensorize(lowerCAmelCase__ ) A_ : Tuple = self._consolidate(lowerCAmelCase__ ) return column def a_ ( self : Optional[int] , _lowerCamelCase : Any ): """simple docstring""" A_ : Optional[int] = self.numpy_arrow_extractor().extract_batch(lowerCAmelCase__ ) A_ : Any = self.python_features_decoder.decode_batch(lowerCAmelCase__ ) A_ : List[str] = self.recursive_tensorize(lowerCAmelCase__ ) for column_name in batch: A_ : int = self._consolidate(batch[column_name] ) return batch
167
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class __lowerCAmelCase : def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]: '''simple docstring''' raise NotImplementedError() def _lowercase ( self ) -> int: '''simple docstring''' raise NotImplementedError() class __lowerCAmelCase ( UpperCamelCase__): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , **lowerCAmelCase__ ) -> int: '''simple docstring''' a__ : str =tokenizer a__ : List[str] =skip_prompt a__ : List[Any] =decode_kwargs # variables used in the streaming process a__ : Dict =[] a__ : int =0 a__ : str =True def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError("TextStreamer only supports batch size 1" ) elif len(value.shape ) > 1: a__ : Any =value[0] if self.skip_prompt and self.next_tokens_are_prompt: a__ : Dict =False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith("\n" ): a__ : List[Any] =text[self.print_len :] a__ : List[str] =[] a__ : Optional[int] =0 # If the last token is a CJK character, we print the characters. elif len(lowerCAmelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ): a__ : List[str] =text[self.print_len :] self.print_len += len(lowerCAmelCase__ ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: a__ : str =text[self.print_len : text.rfind(" " ) + 1] self.print_len += len(lowerCAmelCase__ ) self.on_finalized_text(lowerCAmelCase__ ) def _lowercase ( self ) -> Any: '''simple docstring''' if len(self.token_cache ) > 0: a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) a__ : List[Any] =text[self.print_len :] a__ : List[str] =[] a__ : Optional[int] =0 else: a__ : Union[str, Any] ="" a__ : Any =True self.on_finalized_text(lowerCAmelCase__ , stream_end=lowerCAmelCase__ ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Optional[Any]: '''simple docstring''' print(lowerCAmelCase__ , flush=lowerCAmelCase__ , end="" if not stream_end else None ) def _lowercase ( self , lowerCAmelCase__ ) -> str: '''simple docstring''' if ( (cp >= 0X4E_00 and cp <= 0X9F_FF) or (cp >= 0X34_00 and cp <= 0X4D_BF) # or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) # or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) # or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) # or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) # or (cp >= 0XF9_00 and cp <= 0XFA_FF) or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) # ): # return True return False class __lowerCAmelCase ( UpperCamelCase__): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> Optional[Any]: '''simple docstring''' super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) a__ : str =Queue() a__ : Optional[Any] =None a__ : Any =timeout def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> List[str]: '''simple docstring''' self.text_queue.put(lowerCAmelCase__ , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self ) -> Dict: '''simple docstring''' return self def _lowercase ( self ) -> int: '''simple docstring''' a__ : int =self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
95
0
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=3_0 , __lowercase=2 , __lowercase=3 , __lowercase=True , __lowercase=True , __lowercase=3_2 , __lowercase=5 , __lowercase=4 , __lowercase=3_7 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1_0 , __lowercase=0.02 , __lowercase=None , ) -> List[str]: lowerCAmelCase_ : int = parent lowerCAmelCase_ : Dict = batch_size lowerCAmelCase_ : Any = image_size lowerCAmelCase_ : int = patch_size lowerCAmelCase_ : Union[str, Any] = num_channels lowerCAmelCase_ : List[str] = is_training lowerCAmelCase_ : str = use_labels lowerCAmelCase_ : Tuple = hidden_size lowerCAmelCase_ : str = num_hidden_layers lowerCAmelCase_ : Dict = num_attention_heads lowerCAmelCase_ : Union[str, Any] = intermediate_size lowerCAmelCase_ : int = hidden_act lowerCAmelCase_ : str = hidden_dropout_prob lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase_ : Any = type_sequence_label_size lowerCAmelCase_ : List[str] = initializer_range lowerCAmelCase_ : Tuple = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase_ : Optional[int] = (image_size // patch_size) ** 2 lowerCAmelCase_ : Optional[Any] = num_patches + 1 def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase_ : Any = None if self.use_labels: lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ : Tuple = self.get_config() return config, pixel_values, labels def lowercase_ ( self ) -> List[str]: return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Tuple: lowerCAmelCase_ : Any = ViTMSNModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() lowerCAmelCase_ : Dict = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> str: lowerCAmelCase_ : Optional[Any] = self.type_sequence_label_size lowerCAmelCase_ : Union[str, Any] = ViTMSNForImageClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() lowerCAmelCase_ : Dict = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' ) print('''Labels: {labels}''' ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase_ : Tuple = 1 lowerCAmelCase_ : str = ViTMSNForImageClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() lowerCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase_ : List[str] = model(lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs() lowerCAmelCase_ : str = config_and_inputs lowerCAmelCase_ : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case__( UpperCamelCase__, UpperCamelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Optional[int] = ( {"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : Optional[int] = False SCREAMING_SNAKE_CASE__ : List[Any] = False def lowercase_ ( self ) -> str: lowerCAmelCase_ : int = ViTMSNModelTester(self ) lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 ) def lowercase_ ( self ) -> str: self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMSN does not use inputs_embeds''' ) def lowercase_ ( self ) -> Optional[Any]: pass def lowercase_ ( self ) -> str: lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Optional[Any] = model_class(lowerCAmelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase_ : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : int = model_class(lowerCAmelCase__ ) lowerCAmelCase_ : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : int = [*signature.parameters.keys()] lowerCAmelCase_ : str = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ ) @slow def lowercase_ ( self ) -> List[Any]: for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ : Optional[int] = ViTMSNModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def lowerCAmelCase ( )-> Optional[int]: lowerCAmelCase_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class snake_case__( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase_ ( self ) -> Dict: return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None @slow def lowercase_ ( self ) -> Union[str, Any]: torch.manual_seed(2 ) lowerCAmelCase_ : Tuple = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(lowerCAmelCase__ ) lowerCAmelCase_ : Optional[int] = self.default_image_processor lowerCAmelCase_ : Tuple = prepare_img() lowerCAmelCase_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): lowerCAmelCase_ : Tuple = model(**lowerCAmelCase__ ) # verify the logits lowerCAmelCase_ : Tuple = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) lowerCAmelCase_ : Tuple = torch.tensor([-0.08_03, -0.44_54, -0.23_75] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
262
def _A ( SCREAMING_SNAKE_CASE : int = 50 ): """simple docstring""" a__ : Any =[1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F"""{solution() = }""")
95
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase__ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
from __future__ import annotations def _A ( SCREAMING_SNAKE_CASE : list ): """simple docstring""" if len(SCREAMING_SNAKE_CASE ) == 0: return [] a__ , a__ : int =min(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE ) a__ : Optional[int] =int(max_value - min_value ) + 1 a__ : list[list] =[[] for _ in range(SCREAMING_SNAKE_CASE )] for i in my_list: buckets[int(i - min_value )].append(SCREAMING_SNAKE_CASE ) return [v for bucket in buckets for v in sorted(SCREAMING_SNAKE_CASE )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
95
0
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. lowercase = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: lowerCAmelCase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def _UpperCamelCase ( self ) -> Tuple: snake_case_ = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' ) snake_case_ = text_classifier('This is great !' ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) snake_case_ = text_classifier('This is great !' , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] ) snake_case_ = text_classifier(['This is great !', 'This is bad'] , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [ [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], ] , ) snake_case_ = text_classifier('This is great !' , top_k=1 ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) # Legacy behavior snake_case_ = text_classifier('This is great !' , return_all_scores=lowerCAmelCase__ ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) snake_case_ = text_classifier('This is great !' , return_all_scores=lowerCAmelCase__ ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] ) snake_case_ = text_classifier(['This is great !', 'Something else'] , return_all_scores=lowerCAmelCase__ ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [ [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], ] , ) snake_case_ = text_classifier(['This is great !', 'Something else'] , return_all_scores=lowerCAmelCase__ ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [ {'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_0', 'score': 0.504}, ] , ) @require_torch def _UpperCamelCase ( self ) -> Dict: import torch snake_case_ = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , ) snake_case_ = text_classifier('This is great !' ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) @require_tf def _UpperCamelCase ( self ) -> Any: snake_case_ = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' ) snake_case_ = text_classifier('This is great !' ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) @slow @require_torch def _UpperCamelCase ( self ) -> str: snake_case_ = pipeline('text-classification' ) snake_case_ = text_classifier('This is great !' ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'POSITIVE', 'score': 1.0}] ) snake_case_ = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) snake_case_ = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'POSITIVE', 'score': 0.988}] ) @slow @require_tf def _UpperCamelCase ( self ) -> Tuple: snake_case_ = pipeline('text-classification' , framework='tf' ) snake_case_ = text_classifier('This is great !' ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'POSITIVE', 'score': 1.0}] ) snake_case_ = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) snake_case_ = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': 'POSITIVE', 'score': 0.988}] ) def _UpperCamelCase ( self , a , a , a ) -> List[Any]: snake_case_ = TextClassificationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ ) return text_classifier, ["HuggingFace is in", "This is another test"] def _UpperCamelCase ( self , a , a ) -> List[str]: snake_case_ = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 snake_case_ = "HuggingFace is in" snake_case_ = text_classifier(lowerCAmelCase__ ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{'label': ANY(lowerCAmelCase__ ), 'score': ANY(lowerCAmelCase__ )}] ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) snake_case_ = ["HuggingFace is in ", "Paris is in France"] snake_case_ = text_classifier(lowerCAmelCase__ ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [{'label': ANY(lowerCAmelCase__ ), 'score': ANY(lowerCAmelCase__ )}, {'label': ANY(lowerCAmelCase__ ), 'score': ANY(lowerCAmelCase__ )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format snake_case_ = text_classifier(lowerCAmelCase__ , top_k=lowerCAmelCase__ ) snake_case_ = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [[{'label': ANY(lowerCAmelCase__ ), 'score': ANY(lowerCAmelCase__ )}] * N, [{'label': ANY(lowerCAmelCase__ ), 'score': ANY(lowerCAmelCase__ )}] * N] , ) snake_case_ = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"} snake_case_ = text_classifier(lowerCAmelCase__ ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , {'label': ANY(lowerCAmelCase__ ), 'score': ANY(lowerCAmelCase__ )} , ) self.assertTrue(outputs['label'] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. snake_case_ = [["HuggingFace is in ", "Paris is in France"]] with self.assertRaises(lowerCAmelCase__ ): text_classifier(lowerCAmelCase__ ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility snake_case_ = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [{'label': ANY(lowerCAmelCase__ ), 'score': ANY(lowerCAmelCase__ )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
178
import numpy as np def _A ( SCREAMING_SNAKE_CASE : np.array ): """simple docstring""" return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
95
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' A_ : Optional[Any] = """philschmid/bart-large-cnn-samsum""" A_ : List[Any] = ( """This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """ """and returns a summary of the text.""" ) A_ : Any = """summarizer""" A_ : Any = AutoTokenizer A_ : str = AutoModelForSeqaSeqLM A_ : Optional[int] = ["""text"""] A_ : Optional[int] = ["""text"""] def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple: return self.pre_processor(lowerCAmelCase__ , return_tensors='''pt''' , truncation=lowerCAmelCase__ ) def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: return self.model.generate(**lowerCAmelCase__ )[0] def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Any: return self.pre_processor.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
320
import numpy # List of input, output pairs UpperCAmelCase : str = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) UpperCAmelCase : Optional[int] = (((515, 22, 13), 555), ((61, 35, 49), 150)) UpperCAmelCase : str = [2, 4, 1, 5] UpperCAmelCase : List[str] = len(train_data) UpperCAmelCase : Dict = 0.0_0_9 def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple="train" ): """simple docstring""" return calculate_hypothesis_value(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - output( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def _A ( SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" a__ : Tuple =0 for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=m ): """simple docstring""" a__ : Any =0 for i in range(SCREAMING_SNAKE_CASE ): if index == -1: summation_value += _error(SCREAMING_SNAKE_CASE ) else: summation_value += _error(SCREAMING_SNAKE_CASE ) * train_data[i][0][index] return summation_value def _A ( SCREAMING_SNAKE_CASE : int ): """simple docstring""" a__ : Any =summation_of_cost_derivative(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / m return cost_derivative_value def _A ( ): """simple docstring""" global parameter_vector # Tune these values to set a tolerance value for predicted output a__ : Dict =0.0_0_0_0_0_2 a__ : Union[str, Any] =0 a__ : Any =0 while True: j += 1 a__ : Any =[0, 0, 0, 0] for i in range(0 , len(SCREAMING_SNAKE_CASE ) ): a__ : Tuple =get_cost_derivative(i - 1 ) a__ : List[Any] =( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE , rtol=SCREAMING_SNAKE_CASE , ): break a__ : Optional[Any] =temp_parameter_vector print(("Number of iterations:", j) ) def _A ( ): """simple docstring""" for i in range(len(SCREAMING_SNAKE_CASE ) ): print(("Actual output value:", output(SCREAMING_SNAKE_CASE , "test" )) ) print(("Hypothesis output:", calculate_hypothesis_value(SCREAMING_SNAKE_CASE , "test" )) ) if __name__ == "__main__": run_gradient_descent() print("""\nTesting gradient descent for a linear hypothesis function.\n""") test_gradient_descent()
95
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__) def UpperCAmelCase ( a_ , a_=False ) -> List[str]: """simple docstring""" __A = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" __A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def UpperCAmelCase ( a_ , a_ , a_=False ) -> str: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: __A = "" else: __A = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) __A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict __A = in_proj_weight[ : config.hidden_size, : ] __A = in_proj_bias[: config.hidden_size] __A = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __A = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __A = in_proj_weight[ -config.hidden_size :, : ] __A = in_proj_bias[-config.hidden_size :] def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" __A = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(a_ , a_ ) def UpperCAmelCase ( a_ , a_ , a_ ) -> Union[str, Any]: """simple docstring""" __A = dct.pop(a_ ) __A = val def UpperCAmelCase ( ) -> str: """simple docstring""" __A = "http://images.cocodataset.org/val2017/000000039769.jpg" __A = Image.open(requests.get(a_ , stream=a_ ).raw ) return im @torch.no_grad() def UpperCAmelCase ( a_ , a_ , a_=True ) -> List[Any]: """simple docstring""" __A = ViTConfig() # patch_size if model_name[-1] == "8": __A = 8 # set labels if required if not base_model: __A = 1_0_0_0 __A = "huggingface/label-files" __A = "imagenet-1k-id2label.json" __A = json.load(open(hf_hub_download(a_ , a_ , repo_type="dataset" ) , "r" ) ) __A = {int(a_ ): v for k, v in idalabel.items()} __A = idalabel __A = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: __A = 3_8_4 __A = 1_5_3_6 __A = 1_2 __A = 6 # load original model from torch hub __A = torch.hub.load("facebookresearch/dino:main" , a_ ) original_model.eval() # load state_dict of original model, remove and rename some keys __A = original_model.state_dict() if base_model: remove_classification_head_(a_ ) __A = create_rename_keys(a_ , base_model=a_ ) for src, dest in rename_keys: rename_key(a_ , a_ , a_ ) read_in_q_k_v(a_ , a_ , a_ ) # load HuggingFace model if base_model: __A = ViTModel(a_ , add_pooling_layer=a_ ).eval() else: __A = ViTForImageClassification(a_ ).eval() model.load_state_dict(a_ ) # Check outputs on an image, prepared by ViTImageProcessor __A = ViTImageProcessor() __A = image_processor(images=prepare_img() , return_tensors="pt" ) __A = encoding["pixel_values"] __A = model(a_ ) if base_model: __A = original_model(a_ ) assert torch.allclose(a_ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 ) else: __A = original_model(a_ ) assert logits.shape == outputs.logits.shape assert torch.allclose(a_ , outputs.logits , atol=1E-3 ) Path(a_ ).mkdir(exist_ok=a_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(a_ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(a_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='dino_vitb16', type=str, help='Name of the model trained with DINO you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--base_model', action='store_true', help='Whether to only convert the base model (no projection head weights).', ) parser.set_defaults(base_model=True) SCREAMING_SNAKE_CASE :Tuple = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
15
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" a__ : Optional[Any] =len(SCREAMING_SNAKE_CASE ) while cur > 1: # Find the maximum number in arr a__ : List[Any] =arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi a__ : int =arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )] # Reverse whole list a__ : List[str] =arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )] cur -= 1 return arr if __name__ == "__main__": UpperCAmelCase : int = input("""Enter numbers separated by a comma:\n""").strip() UpperCAmelCase : Optional[int] = [int(item) for item in user_input.split(""",""")] print(pancake_sort(unsorted))
95
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowerCAmelCase : Tuple = { """configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""], """configuration_data2vec_text""": [ """DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecTextConfig""", """Data2VecTextOnnxConfig""", ], """configuration_data2vec_vision""": [ """DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecVisionConfig""", """Data2VecVisionOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Union[str, Any] = [ """DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecAudioForAudioFrameClassification""", """Data2VecAudioForCTC""", """Data2VecAudioForSequenceClassification""", """Data2VecAudioForXVector""", """Data2VecAudioModel""", """Data2VecAudioPreTrainedModel""", ] _lowerCAmelCase : int = [ """DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecTextForCausalLM""", """Data2VecTextForMaskedLM""", """Data2VecTextForMultipleChoice""", """Data2VecTextForQuestionAnswering""", """Data2VecTextForSequenceClassification""", """Data2VecTextForTokenClassification""", """Data2VecTextModel""", """Data2VecTextPreTrainedModel""", ] _lowerCAmelCase : List[Any] = [ """DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecVisionForImageClassification""", """Data2VecVisionForMaskedImageModeling""", """Data2VecVisionForSemanticSegmentation""", """Data2VecVisionModel""", """Data2VecVisionPreTrainedModel""", ] if is_tf_available(): _lowerCAmelCase : str = [ """TFData2VecVisionForImageClassification""", """TFData2VecVisionForSemanticSegmentation""", """TFData2VecVisionModel""", """TFData2VecVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys _lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
169
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class __lowerCAmelCase ( unittest.TestCase): def _lowercase ( self ) -> Dict: '''simple docstring''' a__ : Any =tempfile.mkdtemp() # fmt: off a__ : List[Any] =["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on a__ : str =dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) a__ : List[Any] =["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] a__ : Optional[int] ={"unk_token": "<unk>"} a__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCAmelCase__ ) ) a__ : Optional[Any] ={ "do_resize": True, "size": 2_0, "do_center_crop": True, "crop_size": 1_8, "do_normalize": True, "image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], "image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } a__ : Dict =os.path.join(self.tmpdirname , lowerCAmelCase__ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) def _lowercase ( self , **lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _lowercase ( self , **lowerCAmelCase__ ) -> List[Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _lowercase ( self , **lowerCAmelCase__ ) -> Any: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _lowercase ( self ) -> Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _lowercase ( self ) -> Any: '''simple docstring''' a__ : Optional[Any] =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] a__ : List[Any] =[Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self ) -> Dict: '''simple docstring''' a__ : Union[str, Any] =self.get_tokenizer() a__ : int =self.get_rust_tokenizer() a__ : List[str] =self.get_image_processor() a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) processor_slow.save_pretrained(self.tmpdirname ) a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ ) a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) processor_fast.save_pretrained(self.tmpdirname ) a__ : Dict =CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ ) self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ ) self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ ) def _lowercase ( self ) -> Any: '''simple docstring''' a__ : List[str] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a__ : str =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) a__ : int =self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 ) a__ : Optional[Any] =CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCAmelCase__ ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' a__ : str =self.get_image_processor() a__ : Optional[int] =self.get_tokenizer() a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) a__ : str =self.prepare_image_inputs() a__ : Any =image_processor(lowerCAmelCase__ , return_tensors="np" ) a__ : Optional[int] =processor(images=lowerCAmelCase__ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' a__ : Optional[int] =self.get_image_processor() a__ : List[Any] =self.get_tokenizer() a__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) a__ : Union[str, Any] ="lower newer" a__ : List[str] =processor(text=lowerCAmelCase__ ) a__ : str =tokenizer(lowerCAmelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowercase ( self ) -> Optional[int]: '''simple docstring''' a__ : Any =self.get_image_processor() a__ : Dict =self.get_tokenizer() a__ : Union[str, Any] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) a__ : Dict ="lower newer" a__ : int =self.prepare_image_inputs() a__ : Any =processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(lowerCAmelCase__ ): processor() def _lowercase ( self ) -> str: '''simple docstring''' a__ : Union[str, Any] =self.get_image_processor() a__ : Optional[Any] =self.get_tokenizer() a__ : str =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) a__ : int =self.prepare_image_inputs() a__ : Union[str, Any] =self.prepare_image_inputs() a__ : Tuple =processor(images=lowerCAmelCase__ , visual_prompt=lowerCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] ) # test if it raises when no input is passed with pytest.raises(lowerCAmelCase__ ): processor() def _lowercase ( self ) -> Tuple: '''simple docstring''' a__ : Optional[int] =self.get_image_processor() a__ : Any =self.get_tokenizer() a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) a__ : Dict =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a__ : Optional[Any] =processor.batch_decode(lowerCAmelCase__ ) a__ : Dict =tokenizer.batch_decode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
95
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Any = logging.get_logger(__name__) __lowerCAmelCase : Tuple = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __lowerCAmelCase ( UpperCamelCase__ ): """simple docstring""" A__ : int = """speech_to_text_2""" A__ : Tuple = ["""past_key_values"""] A__ : Union[str, Any] = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : List[str] , _snake_case : int=1_0000 , _snake_case : str=6 , _snake_case : List[Any]=2048 , _snake_case : Optional[int]=4 , _snake_case : Tuple=0.0 , _snake_case : Union[str, Any]=True , _snake_case : List[Any]="relu" , _snake_case : Any=256 , _snake_case : List[Any]=0.1 , _snake_case : str=0.0 , _snake_case : Optional[Any]=0.0 , _snake_case : Optional[Any]=0.02 , _snake_case : List[str]=2 , _snake_case : List[str]=True , _snake_case : Any=1 , _snake_case : str=0 , _snake_case : int=2 , _snake_case : List[Any]=1024 , **_snake_case : Union[str, Any] , ): __lowercase : List[str] = vocab_size __lowercase : List[str] = d_model __lowercase : List[str] = decoder_ffn_dim __lowercase : Union[str, Any] = decoder_layers __lowercase : Dict = decoder_attention_heads __lowercase : Union[str, Any] = dropout __lowercase : str = attention_dropout __lowercase : Tuple = activation_dropout __lowercase : List[str] = activation_function __lowercase : Dict = init_std __lowercase : str = decoder_layerdrop __lowercase : Dict = use_cache __lowercase : List[str] = decoder_layers __lowercase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True __lowercase : Dict = max_target_positions super().__init__( pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
156
def _A ( SCREAMING_SNAKE_CASE : list ): """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise ValueError("Input series is not valid, valid series - [2, 4, 6]" ) if len(SCREAMING_SNAKE_CASE ) == 0: raise ValueError("Input list must be a non empty list" ) if len(SCREAMING_SNAKE_CASE ) == 1: return True a__ : Union[str, Any] =series[1] - series[0] for index in range(len(SCREAMING_SNAKE_CASE ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def _A ( SCREAMING_SNAKE_CASE : list ): """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise ValueError("Input series is not valid, valid series - [2, 4, 6]" ) if len(SCREAMING_SNAKE_CASE ) == 0: raise ValueError("Input list must be a non empty list" ) a__ : Any =0 for val in series: answer += val return answer / len(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
95
0
"""simple docstring""" from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def snake_case ( ): UpperCAmelCase_ : Union[str, Any] = [randint(-10_00 ,10_00 ) for i in range(10 )] UpperCAmelCase_ : Optional[Any] = randint(-50_00 ,50_00 ) return (arr, r) lowerCamelCase_ = make_dataset() def snake_case ( A__ ,A__ ): for triplet in permutations(A__ ,3 ): if sum(A__ ) == target: return tuple(sorted(A__ ) ) return (0, 0, 0) def snake_case ( A__ ,A__ ): arr.sort() UpperCAmelCase_ : int = len(A__ ) for i in range(n - 1 ): UpperCAmelCase_ : Dict = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def snake_case ( ): UpperCAmelCase_ : Any = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n" UpperCAmelCase_ : str = "\ntriplet_sum1(*dataset)\n" UpperCAmelCase_ : str = "\ntriplet_sum2(*dataset)\n" UpperCAmelCase_ : Optional[int] = repeat(setup=A__ ,stmt=A__ ,repeat=5 ,number=1_00_00 ) UpperCAmelCase_ : List[Any] = repeat(setup=A__ ,stmt=A__ ,repeat=5 ,number=1_00_00 ) return (min(A__ ), min(A__ )) if __name__ == "__main__": from doctest import testmod testmod() lowerCamelCase_ = solution_times() print(f'The time for naive implementation is {times[0]}.') print(f'The time for optimized implementation is {times[1]}.')
268
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class __lowerCAmelCase ( UpperCamelCase__): _lowercase : Tuple = """M-CLIP""" def __init__( self , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=7_6_8 , **lowerCAmelCase__ ) -> Any: '''simple docstring''' a__ : int =transformerDimSize a__ : Dict =imageDimSize super().__init__(**lowerCAmelCase__ ) class __lowerCAmelCase ( UpperCamelCase__): _lowercase : Optional[Any] = MCLIPConfig def __init__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]: '''simple docstring''' super().__init__(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) a__ : Tuple =XLMRobertaModel(lowerCAmelCase__ ) a__ : List[str] =torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' a__ : Optional[Any] =self.transformer(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0] a__ : int =(embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(lowerCAmelCase__ ), embs
95
0
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[int]: return EnvironmentCommand() def A__ ( SCREAMING_SNAKE_CASE__) -> Any: return EnvironmentCommand(args.accelerate_config_file) class __snake_case ( UpperCamelCase__ ): '''simple docstring''' @staticmethod def UpperCAmelCase__ ( A : List[Any] ): __snake_case: Tuple = parser.add_parser("""env""" ) download_parser.set_defaults(func=lowerCAmelCase__ ) download_parser.add_argument( """--accelerate-config_file""" , default=lowerCAmelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , ) download_parser.set_defaults(func=lowerCAmelCase__ ) def __init__( self : Union[str, Any] , A : Optional[int] , *A : Tuple ): __snake_case: Optional[Any] = accelerate_config_file def UpperCAmelCase__ ( self : Optional[int] ): __snake_case: List[str] = "not installed" if is_safetensors_available(): import safetensors __snake_case: Tuple = safetensors.__version__ elif importlib.util.find_spec("""safetensors""" ) is not None: import safetensors __snake_case: Tuple = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.''' __snake_case: Union[str, Any] = "not installed" __snake_case: Union[str, Any] = "not found" if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file __snake_case: List[str] = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase__ ): __snake_case: int = load_config_from_file(self._accelerate_config_file ).to_dict() __snake_case: Union[str, Any] = ( "\n".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else f'''\t{accelerate_config}''' ) __snake_case: Tuple = "not installed" __snake_case: List[str] = "NA" if is_torch_available(): import torch __snake_case: List[Any] = torch.__version__ __snake_case: Tuple = torch.cuda.is_available() __snake_case: Optional[Any] = "not installed" __snake_case: Optional[int] = "NA" if is_tf_available(): import tensorflow as tf __snake_case: List[str] = tf.__version__ try: # deprecated in v2.1 __snake_case: List[str] = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool __snake_case: Optional[Any] = bool(tf.config.list_physical_devices("""GPU""" ) ) __snake_case: Optional[Any] = "not installed" __snake_case: Union[str, Any] = "not installed" __snake_case: Optional[int] = "not installed" __snake_case: Optional[int] = "NA" if is_flax_available(): import flax import jax import jaxlib __snake_case: int = flax.__version__ __snake_case: Dict = jax.__version__ __snake_case: str = jaxlib.__version__ __snake_case: List[str] = jax.lib.xla_bridge.get_backend().platform __snake_case: Any = { "`transformers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Huggingface_hub version": huggingface_hub.__version__, "Safetensors version": f'''{safetensors_version}''', "Accelerate version": f'''{accelerate_version}''', "Accelerate config": f'''{accelerate_config_str}''', "PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''', "Tensorflow version (GPU?)": f'''{tf_version} ({tf_cuda_available})''', "Flax version (CPU?/GPU?/TPU?)": f'''{flax_version} ({jax_backend})''', "Jax version": f'''{jax_version}''', "JaxLib version": f'''{jaxlib_version}''', "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" ) print(self.format_dict(lowerCAmelCase__ ) ) return info @staticmethod def UpperCAmelCase__ ( A : str ): return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
111
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase : Any = 16 UpperCAmelCase : str = 32 def _A ( SCREAMING_SNAKE_CASE : Accelerator , SCREAMING_SNAKE_CASE : int = 16 ): """simple docstring""" a__ : int =AutoTokenizer.from_pretrained("bert-base-cased" ) a__ : List[str] =load_dataset("glue" , "mrpc" ) def tokenize_function(SCREAMING_SNAKE_CASE : List[Any] ): # max_length=None => use the model max length (it's actually the default) a__ : int =tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): a__ : Dict =datasets.map( SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library a__ : Dict =tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(SCREAMING_SNAKE_CASE : str ): # On TPU it's best to pad everything to the same length or training will be very slow. a__ : Optional[Any] =128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": a__ : str =16 elif accelerator.mixed_precision != "no": a__ : Union[str, Any] =8 else: a__ : List[str] =None return tokenizer.pad( SCREAMING_SNAKE_CASE , padding="longest" , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors="pt" , ) # Instantiate dataloaders. a__ : Any =DataLoader( tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE ) a__ : int =DataLoader( tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase : str = mocked_dataloaders # noqa: F811 def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ): """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE ) == "1": a__ : Tuple =2 # Initialize accelerator a__ : int =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a__ : Optional[int] =config["lr"] a__ : Union[str, Any] =int(config["num_epochs"] ) a__ : Any =int(config["seed"] ) a__ : Dict =int(config["batch_size"] ) a__ : int =evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation a__ : int =1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: a__ : Dict =batch_size // MAX_GPU_BATCH_SIZE a__ : Tuple =MAX_GPU_BATCH_SIZE set_seed(SCREAMING_SNAKE_CASE ) a__ , a__ : Optional[int] =get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) a__ : List[str] =AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). a__ : List[str] =model.to(accelerator.device ) # Instantiate optimizer a__ : List[Any] =AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE ) # Instantiate scheduler a__ : Optional[int] =get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a__ , a__ , a__ , a__ , a__ : Optional[int] =accelerator.prepare( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) a__ : Dict =model(**SCREAMING_SNAKE_CASE ) a__ : List[Any] =outputs.loss a__ : List[str] =loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() a__ : Optional[Any] =0 for step, batch in enumerate(SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): a__ : Any =model(**SCREAMING_SNAKE_CASE ) a__ : str =outputs.logits.argmax(dim=-1 ) a__ , a__ : List[str] =accelerator.gather((predictions, batch["labels"]) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(SCREAMING_SNAKE_CASE ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples a__ : Optional[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen] a__ : Dict =references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , ) a__ : Tuple =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE ) def _A ( ): """simple docstring""" a__ : List[str] =argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) a__ : str =parser.parse_args() a__ : Optional[int] ={"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
95
0
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Optional[int] = logging.get_logger(__name__) # TODO Update this __A : Optional[Any] = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class __A ( UpperCamelCase__ ): lowerCAmelCase_ : List[Any] = """esm""" def __init__( self : Optional[Any] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Union[str, Any]=3072 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=1026 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Optional[int]=1E-12 , UpperCAmelCase_ : List[str]="absolute" , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : List[str] , ): super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) lowerCAmelCase : Union[str, Any] = vocab_size lowerCAmelCase : List[Any] = hidden_size lowerCAmelCase : Optional[Any] = num_hidden_layers lowerCAmelCase : str = num_attention_heads lowerCAmelCase : Tuple = intermediate_size lowerCAmelCase : List[Any] = hidden_dropout_prob lowerCAmelCase : Optional[int] = attention_probs_dropout_prob lowerCAmelCase : int = max_position_embeddings lowerCAmelCase : List[str] = initializer_range lowerCAmelCase : Optional[int] = layer_norm_eps lowerCAmelCase : Dict = position_embedding_type lowerCAmelCase : int = use_cache lowerCAmelCase : Tuple = emb_layer_norm_before lowerCAmelCase : Union[str, Any] = token_dropout lowerCAmelCase : List[Any] = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('No esmfold_config supplied for folding model, using default values.' ) lowerCAmelCase : List[Any] = EsmFoldConfig() elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase : Any = EsmFoldConfig(**lowerCAmelCase__ ) lowerCAmelCase : Any = esmfold_config if vocab_list is None: logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' ) lowerCAmelCase : List[Any] = get_default_vocab_list() else: lowerCAmelCase : List[str] = vocab_list else: lowerCAmelCase : Any = None lowerCAmelCase : str = None if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , lowerCAmelCase__ ): raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' ) def lowercase__ ( self : List[Any] ): lowerCAmelCase : Optional[Any] = super().to_dict() if isinstance(self.esmfold_config , lowerCAmelCase__ ): lowerCAmelCase : List[str] = self.esmfold_config.to_dict() return output @dataclass class __A : lowerCAmelCase_ : str = None lowerCAmelCase_ : bool = True lowerCAmelCase_ : bool = False lowerCAmelCase_ : bool = False lowerCAmelCase_ : bool = False lowerCAmelCase_ : float = 0 lowerCAmelCase_ : bool = True lowerCAmelCase_ : bool = False lowerCAmelCase_ : int = 128 lowerCAmelCase_ : "TrunkConfig" = None def lowercase__ ( self : Union[str, Any] ): if self.trunk is None: lowerCAmelCase : List[Any] = TrunkConfig() elif isinstance(self.trunk , lowerCAmelCase__ ): lowerCAmelCase : Dict = TrunkConfig(**self.trunk ) def lowercase__ ( self : int ): lowerCAmelCase : Any = asdict(self ) lowerCAmelCase : List[str] = self.trunk.to_dict() return output @dataclass class __A : lowerCAmelCase_ : int = 48 lowerCAmelCase_ : int = 1024 lowerCAmelCase_ : int = 128 lowerCAmelCase_ : int = 32 lowerCAmelCase_ : int = 32 lowerCAmelCase_ : int = 32 lowerCAmelCase_ : float = 0 lowerCAmelCase_ : float = 0 lowerCAmelCase_ : bool = False lowerCAmelCase_ : int = 4 lowerCAmelCase_ : Optional[int] = 128 lowerCAmelCase_ : "StructureModuleConfig" = None def lowercase__ ( self : List[Any] ): if self.structure_module is None: lowerCAmelCase : List[Any] = StructureModuleConfig() elif isinstance(self.structure_module , lowerCAmelCase__ ): lowerCAmelCase : Dict = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got' f" {self.sequence_state_dim} and {self.sequence_state_dim}." ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got' f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." ) lowerCAmelCase : int = self.sequence_state_dim // self.sequence_head_width lowerCAmelCase : Dict = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got' f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got' f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." ) if self.dropout >= 0.4: raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." ) def lowercase__ ( self : Optional[Any] ): lowerCAmelCase : str = asdict(self ) lowerCAmelCase : List[str] = self.structure_module.to_dict() return output @dataclass class __A : lowerCAmelCase_ : int = 384 lowerCAmelCase_ : int = 128 lowerCAmelCase_ : int = 16 lowerCAmelCase_ : int = 128 lowerCAmelCase_ : int = 12 lowerCAmelCase_ : int = 4 lowerCAmelCase_ : int = 8 lowerCAmelCase_ : float = 0.1 lowerCAmelCase_ : int = 8 lowerCAmelCase_ : int = 1 lowerCAmelCase_ : int = 2 lowerCAmelCase_ : int = 7 lowerCAmelCase_ : int = 10 lowerCAmelCase_ : float = 1E-8 lowerCAmelCase_ : float = 1E5 def lowercase__ ( self : Any ): return asdict(self ) def SCREAMING_SNAKE_CASE__ ( ) -> Dict: '''simple docstring''' return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
138
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __lowerCAmelCase ( unittest.TestCase): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> Optional[Any]: '''simple docstring''' a__ : Union[str, Any] =size if size is not None else {"shortest_edge": 2_0} a__ : List[str] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8} a__ : Tuple =parent a__ : Union[str, Any] =batch_size a__ : List[str] =num_channels a__ : List[Any] =image_size a__ : str =min_resolution a__ : Optional[int] =max_resolution a__ : Tuple =do_resize a__ : Union[str, Any] =size a__ : List[Any] =do_center_crop a__ : List[str] =crop_size a__ : Optional[int] =do_flip_channel_order def _lowercase ( self ) -> Optional[int]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase): _lowercase : int = MobileViTImageProcessor if is_vision_available() else None def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' a__ : Tuple =MobileViTImageProcessingTester(self ) @property def _lowercase ( self ) -> List[str]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self ) -> List[str]: '''simple docstring''' a__ : str =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "center_crop" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_flip_channel_order" ) ) def _lowercase ( self ) -> Optional[int]: '''simple docstring''' a__ : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 2_0} ) self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} ) a__ : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2} ) self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' pass def _lowercase ( self ) -> Tuple: '''simple docstring''' a__ : List[str] =self.image_processing_class(**self.image_processor_dict ) # create random PIL images a__ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a__ : List[Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _lowercase ( self ) -> Any: '''simple docstring''' a__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a__ : int =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' a__ : int =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a__ : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a__ : List[str] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
95
0
"""simple docstring""" from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class lowercase ( UpperCamelCase__): def __init__( self : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[Any] = None , _lowerCamelCase : str = None , _lowerCamelCase : int = False , _lowerCamelCase : str = False , _lowerCamelCase : str = None , **_lowerCamelCase : List[str] , ): """simple docstring""" super().__init__( lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , ) A_ : List[str] = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths} A_ : Dict = Text( cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , **lowerCAmelCase__ , ) def a_ ( self : List[Any] ): """simple docstring""" if self.streaming: A_ : Tuple = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A_ : Union[str, Any] = None A_ : Optional[int] = None A_ : Union[str, Any] = None A_ : Optional[Any] = None self.builder.download_and_prepare( download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , ) A_ : int = self.builder.as_dataset( split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory ) return dataset
167
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __lowerCAmelCase ( unittest.TestCase): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , ) -> Optional[int]: '''simple docstring''' a__ : str =size if size is not None else {"shortest_edge": 2_0} a__ : Union[str, Any] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8} a__ : Tuple =parent a__ : Optional[int] =batch_size a__ : Any =num_channels a__ : List[str] =image_size a__ : Dict =min_resolution a__ : List[Any] =max_resolution a__ : Dict =do_resize a__ : Union[str, Any] =size a__ : str =do_center_crop a__ : List[str] =crop_size def _lowercase ( self ) -> str: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase): _lowercase : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None def _lowercase ( self ) -> Tuple: '''simple docstring''' a__ : Optional[int] =MobileNetVaImageProcessingTester(self ) @property def _lowercase ( self ) -> List[str]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self ) -> Any: '''simple docstring''' a__ : List[str] =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "crop_size" ) ) def _lowercase ( self ) -> str: '''simple docstring''' a__ : Any =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 2_0} ) self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} ) a__ : Dict =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2} ) self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} ) def _lowercase ( self ) -> Any: '''simple docstring''' pass def _lowercase ( self ) -> Optional[int]: '''simple docstring''' a__ : Dict =self.image_processing_class(**self.image_processor_dict ) # create random PIL images a__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input a__ : List[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a__ : Dict =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _lowercase ( self ) -> int: '''simple docstring''' a__ : str =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a__ : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a__ : Union[str, Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _lowercase ( self ) -> Optional[int]: '''simple docstring''' a__ : Any =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a__ : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input a__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a__ : str =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
95
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : List[Any] = tempfile.mkdtemp() lowerCAmelCase_ : List[Any] = BlipImageProcessor() lowerCAmelCase_ : int = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' ) lowerCAmelCase_ : Optional[Any] = BlipProcessor(lowerCAmelCase__ , lowerCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def lowercase_ ( self , **__lowercase ) -> List[Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).tokenizer def lowercase_ ( self , **__lowercase ) -> List[Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).image_processor def lowercase_ ( self ) -> Any: shutil.rmtree(self.tmpdirname ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] lowerCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : List[str] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase_ : List[str] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowerCAmelCase_ : Union[str, Any] = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 ) lowerCAmelCase_ : Optional[Any] = BlipProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCAmelCase__ ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : str = self.get_image_processor() lowerCAmelCase_ : int = self.get_tokenizer() lowerCAmelCase_ : Optional[Any] = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) lowerCAmelCase_ : Dict = self.prepare_image_inputs() lowerCAmelCase_ : List[str] = image_processor(lowerCAmelCase__ , return_tensors='''np''' ) lowerCAmelCase_ : Union[str, Any] = processor(images=lowerCAmelCase__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : int = self.get_image_processor() lowerCAmelCase_ : int = self.get_tokenizer() lowerCAmelCase_ : Dict = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) lowerCAmelCase_ : Dict = "lower newer" lowerCAmelCase_ : Any = processor(text=lowerCAmelCase__ ) lowerCAmelCase_ : Optional[int] = tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : List[Any] = self.get_image_processor() lowerCAmelCase_ : Any = self.get_tokenizer() lowerCAmelCase_ : Dict = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) lowerCAmelCase_ : List[str] = "lower newer" lowerCAmelCase_ : List[str] = self.prepare_image_inputs() lowerCAmelCase_ : Union[str, Any] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] ) # test if it raises when no input is passed with pytest.raises(lowerCAmelCase__ ): processor() def lowercase_ ( self ) -> int: lowerCAmelCase_ : Optional[int] = self.get_image_processor() lowerCAmelCase_ : Tuple = self.get_tokenizer() lowerCAmelCase_ : Optional[Any] = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) lowerCAmelCase_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase_ : List[str] = processor.batch_decode(lowerCAmelCase__ ) lowerCAmelCase_ : Dict = tokenizer.batch_decode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : int = self.get_image_processor() lowerCAmelCase_ : str = self.get_tokenizer() lowerCAmelCase_ : Union[str, Any] = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) lowerCAmelCase_ : Optional[Any] = "lower newer" lowerCAmelCase_ : int = self.prepare_image_inputs() lowerCAmelCase_ : Optional[int] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
262
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Any = { """configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""], """tokenization_convbert""": ["""ConvBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ["""ConvBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[str] = [ """CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvBertForMaskedLM""", """ConvBertForMultipleChoice""", """ConvBertForQuestionAnswering""", """ConvBertForSequenceClassification""", """ConvBertForTokenClassification""", """ConvBertLayer""", """ConvBertModel""", """ConvBertPreTrainedModel""", """load_tf_weights_in_convbert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = [ """TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFConvBertForMaskedLM""", """TFConvBertForMultipleChoice""", """TFConvBertForQuestionAnswering""", """TFConvBertForSequenceClassification""", """TFConvBertForTokenClassification""", """TFConvBertLayer""", """TFConvBertModel""", """TFConvBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
95
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: UpperCamelCase__ = None UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase__ = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } UpperCamelCase__ = { """google/bigbird-roberta-base""": 4096, """google/bigbird-roberta-large""": 4096, """google/bigbird-base-trivia-itc""": 4096, } UpperCamelCase__ = """▁""" class a__ ( UpperCamelCase__ ): _a : Union[str, Any] = VOCAB_FILES_NAMES _a : Any = PRETRAINED_VOCAB_FILES_MAP _a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a : Tuple = BigBirdTokenizer _a : Any = ["""input_ids""", """attention_mask"""] _a : List[int] = [] def __init__( self , _A=None , _A=None , _A="<unk>" , _A="<s>" , _A="</s>" , _A="<pad>" , _A="[SEP]" , _A="[MASK]" , _A="[CLS]" , **_A , ): """simple docstring""" __lowerCAmelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token __lowerCAmelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token __lowerCAmelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token __lowerCAmelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token __lowerCAmelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token __lowerCAmelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it __lowerCAmelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , ) __lowerCAmelCase = vocab_file __lowerCAmelCase = False if not self.vocab_file else True def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase__ )) + [1] return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1] def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCAmelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ): copyfile(self.vocab_file , lowerCAmelCase__ ) return (out_vocab_file,)
92
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Dict = logging.get_logger(__name__) UpperCAmelCase : Tuple = { """caidas/swin2sr-classicalsr-x2-64""": ( """https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json""" ), } class __lowerCAmelCase ( UpperCamelCase__): _lowercase : Any = """swin2sr""" _lowercase : Tuple = { """hidden_size""": """embed_dim""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , lowerCAmelCase__=6_4 , lowerCAmelCase__=1 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8_0 , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=8 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=2 , lowerCAmelCase__=1.0 , lowerCAmelCase__="1conv" , lowerCAmelCase__="pixelshuffle" , **lowerCAmelCase__ , ) -> int: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) a__ : Optional[Any] =image_size a__ : Dict =patch_size a__ : Tuple =num_channels a__ : Union[str, Any] =embed_dim a__ : Optional[Any] =depths a__ : List[str] =len(lowerCAmelCase__ ) a__ : Any =num_heads a__ : Any =window_size a__ : str =mlp_ratio a__ : List[str] =qkv_bias a__ : Dict =hidden_dropout_prob a__ : List[str] =attention_probs_dropout_prob a__ : Dict =drop_path_rate a__ : Optional[Any] =hidden_act a__ : Union[str, Any] =use_absolute_embeddings a__ : Optional[Any] =layer_norm_eps a__ : List[Any] =initializer_range a__ : int =upscale a__ : Optional[int] =img_range a__ : Any =resi_connection a__ : Optional[Any] =upsampler
95
0
import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor lowercase = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase__ ): '''simple docstring''' def __init__( self , *a , **a ) -> None: warnings.warn( 'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use VideoMAEImageProcessor instead.' , lowerCAmelCase__ , ) super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
178
from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class __lowerCAmelCase : pass
95
0
"""simple docstring""" def A_ ( _lowerCAmelCase : int ): """simple docstring""" if not isinstance(_lowerCAmelCase, _lowerCAmelCase ): raise ValueError('''multiplicative_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''multiplicative_persistence() does not accept negative values''' ) _a = 0 _a = str(_lowerCAmelCase ) while len(_lowerCAmelCase ) != 1: _a = [int(_lowerCAmelCase ) for i in num_string] _a = 1 for i in range(0, len(_lowerCAmelCase ) ): total *= numbers[i] _a = str(_lowerCAmelCase ) steps += 1 return steps def A_ ( _lowerCAmelCase : int ): """simple docstring""" if not isinstance(_lowerCAmelCase, _lowerCAmelCase ): raise ValueError('''additive_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''additive_persistence() does not accept negative values''' ) _a = 0 _a = str(_lowerCAmelCase ) while len(_lowerCAmelCase ) != 1: _a = [int(_lowerCAmelCase ) for i in num_string] _a = 0 for i in range(0, len(_lowerCAmelCase ) ): total += numbers[i] _a = str(_lowerCAmelCase ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
320
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class __lowerCAmelCase ( UpperCamelCase__): _lowercase : Optional[Any] = """philschmid/bart-large-cnn-samsum""" _lowercase : List[Any] = ( """This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """ """and returns a summary of the text.""" ) _lowercase : Any = """summarizer""" _lowercase : Any = AutoTokenizer _lowercase : str = AutoModelForSeqaSeqLM _lowercase : Optional[int] = ["""text"""] _lowercase : Optional[int] = ["""text"""] def _lowercase ( self , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' return self.pre_processor(lowerCAmelCase__ , return_tensors="pt" , truncation=lowerCAmelCase__ ) def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' return self.model.generate(**lowerCAmelCase__ )[0] def _lowercase ( self , lowerCAmelCase__ ) -> Any: '''simple docstring''' return self.pre_processor.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
95
0
def UpperCAmelCase ( a_ , a_ ) -> str: """simple docstring""" __A = [1] for i in range(2 , a_ ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" __A = [] __A = list(range(a_ ) ) # Find permutation while factorials: __A = factorials.pop() __A = divmod(a_ , a_ ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
15
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer UpperCAmelCase : int = logging.get_logger(__name__) UpperCAmelCase : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCAmelCase : List[Any] = [ """small""", """small-base""", """medium""", """medium-base""", """intermediate""", """intermediate-base""", """large""", """large-base""", """xlarge""", """xlarge-base""", ] UpperCAmelCase : Optional[int] = { """vocab_file""": { """funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""", """funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""", """funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""", """funnel-transformer/medium-base""": ( """https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt""" ), """funnel-transformer/intermediate""": ( """https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt""" ), """funnel-transformer/intermediate-base""": ( """https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt""" ), """funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""", """funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""", """funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""", """funnel-transformer/xlarge-base""": ( """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""", """funnel-transformer/small-base""": ( """https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json""" ), """funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""", """funnel-transformer/medium-base""": ( """https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json""" ), """funnel-transformer/intermediate""": ( """https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json""" ), """funnel-transformer/intermediate-base""": ( """https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json""" ), """funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""", """funnel-transformer/large-base""": ( """https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json""" ), """funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""", """funnel-transformer/xlarge-base""": ( """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json""" ), }, } UpperCAmelCase : Optional[int] = {F"""funnel-transformer/{name}""": 512 for name in _model_names} UpperCAmelCase : Optional[int] = {F"""funnel-transformer/{name}""": {"""do_lower_case""": True} for name in _model_names} class __lowerCAmelCase ( UpperCamelCase__): _lowercase : str = VOCAB_FILES_NAMES _lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP _lowercase : Dict = PRETRAINED_INIT_CONFIGURATION _lowercase : Union[str, Any] = FunnelTokenizer _lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : int = 2 def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<sep>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<cls>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__="##" , **lowerCAmelCase__ , ) -> Optional[int]: '''simple docstring''' super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , clean_text=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , wordpieces_prefix=lowerCAmelCase__ , **lowerCAmelCase__ , ) a__ : Optional[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars ): a__ : List[str] =getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) ) a__ : Union[str, Any] =do_lower_case a__ : Any =strip_accents a__ : Optional[Any] =tokenize_chinese_chars a__ : Dict =normalizer_class(**lowerCAmelCase__ ) a__ : Any =do_lower_case def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> str: '''simple docstring''' a__ : Dict =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]: '''simple docstring''' a__ : Optional[int] =[self.sep_token_id] a__ : Union[str, Any] =[self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]: '''simple docstring''' a__ : Tuple =self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ ) return tuple(lowerCAmelCase__ )
95
0
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() _lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } _lowerCAmelCase : List[Any] = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def lowerCAmelCase ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ): """simple docstring""" for attribute in key.split("." ): UpperCAmelCase__ = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: UpperCAmelCase__ = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: UpperCAmelCase__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase__ = value elif weight_type == "weight_g": UpperCAmelCase__ = value elif weight_type == "weight_v": UpperCAmelCase__ = value elif weight_type == "bias": UpperCAmelCase__ = value else: UpperCAmelCase__ = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def lowerCAmelCase ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ): """simple docstring""" UpperCAmelCase__ = [] UpperCAmelCase__ = fairseq_model.state_dict() UpperCAmelCase__ = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , ) UpperCAmelCase__ = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase__ = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key): # special case since naming is very similar continue UpperCAmelCase__ = True if "*" in mapped_key: UpperCAmelCase__ = name.split(_lowerCAmelCase )[0].split("." )[-2] UpperCAmelCase__ = mapped_key.replace("*" , _lowerCAmelCase ) if "weight_g" in name: UpperCAmelCase__ = "weight_g" elif "weight_v" in name: UpperCAmelCase__ = "weight_v" elif "bias" in name: UpperCAmelCase__ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ = "weight" else: UpperCAmelCase__ = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowerCAmelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = full_name.split("conv_layers." )[-1] UpperCAmelCase__ = name.split("." ) UpperCAmelCase__ = int(items[0] ) UpperCAmelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_lowerCAmelCase ) @torch.no_grad() def lowerCAmelCase ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Any=True ): """simple docstring""" if config_path is not None: UpperCAmelCase__ = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase ) else: UpperCAmelCase__ = UniSpeechSatConfig() UpperCAmelCase__ = "" if is_finetuned: UpperCAmelCase__ = UniSpeechSatForCTC(_lowerCAmelCase ) else: UpperCAmelCase__ = UniSpeechSatForPreTraining(_lowerCAmelCase ) UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) UpperCAmelCase__ = model[0].eval() recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
169
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __lowerCAmelCase ( UpperCamelCase__): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = "arrow" , **lowerCAmelCase__ , ) -> Optional[int]: '''simple docstring''' super().__init__( split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , **lowerCAmelCase__ , ) a__ : int =load_from_cache_file a__ : Tuple =file_format a__ : List[Any] =Spark( df=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , working_dir=lowerCAmelCase__ , **lowerCAmelCase__ , ) def _lowercase ( self ) -> str: '''simple docstring''' if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) a__ : str =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=lowerCAmelCase__ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
95
0
from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def UpperCAmelCase_ ( __lowerCAmelCase ) -> Dict: return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) __lowerCAmelCase : Optional[int] = """ transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. """ class __lowerCAmelCase ( UpperCamelCase__ ): """simple docstring""" @staticmethod def snake_case_ ( _snake_case : Optional[Any] ): __lowercase : int = parser.add_parser( '''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , ) train_parser.add_argument('''--model_type''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Model\'s type.''' ) train_parser.add_argument( '''--tf_checkpoint''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''TensorFlow checkpoint path or folder.''' ) train_parser.add_argument( '''--pytorch_dump_output''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to the PyTorch saved model output.''' ) train_parser.add_argument('''--config''' , type=lowerCAmelCase__ , default='''''' , help='''Configuration file path or folder.''' ) train_parser.add_argument( '''--finetuning_task_name''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , ) train_parser.set_defaults(func=lowerCAmelCase__ ) def __init__( self : str , _snake_case : str , _snake_case : int , _snake_case : Dict , _snake_case : Dict , _snake_case : Optional[Any] , *_snake_case : List[Any] , ): __lowercase : Optional[int] = logging.get_logger('''transformers-cli/converting''' ) self._logger.info(F'Loading model {model_type}' ) __lowercase : Optional[int] = model_type __lowercase : Union[str, Any] = tf_checkpoint __lowercase : Optional[Any] = pytorch_dump_output __lowercase : Dict = config __lowercase : Tuple = finetuning_task_name def snake_case_ ( self : str ): if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(lowerCAmelCase__ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase__ ) if "ckpt" in self._tf_checkpoint.lower(): __lowercase : Tuple = self._tf_checkpoint __lowercase : str = "" else: __lowercase : Union[str, Any] = self._tf_checkpoint __lowercase : List[Any] = "" convert_transfo_xl_checkpoint_to_pytorch( lowerCAmelCase__ , self._config , self._pytorch_dump_output , lowerCAmelCase__ ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase__ ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase__ ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( '''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
156
from math import pi def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): """simple docstring""" return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
95
0
"""simple docstring""" def snake_case ( A__ ): UpperCAmelCase_ : Optional[int] = [int(A__ ) for i in ip_va_address.split("." ) if i.isdigit()] return len(A__ ) == 4 and all(0 <= int(A__ ) <= 2_54 for octet in octets ) if __name__ == "__main__": lowerCamelCase_ = input().strip() lowerCamelCase_ = """valid""" if is_ip_va_address_valid(ip) else """invalid""" print(f'{ip} is a {valid_or_invalid} IP v4 address.')
268
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging UpperCAmelCase : int = { """cola""": 2, """mnli""": 3, """mrpc""": 2, """sst-2""": 2, """sts-b""": 1, """qqp""": 2, """qnli""": 2, """rte""": 2, """wnli""": 2, } logging.set_verbosity_info() def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any]=None ): """simple docstring""" a__ : Optional[int] =XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE ) a__ : Dict =finetuning_task.lower() if finetuning_task is not None else "" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' ) a__ : List[str] =finetuning_task a__ : Tuple =GLUE_TASKS_NUM_LABELS[finetuning_task] a__ : List[Any] =XLNetForSequenceClassification(SCREAMING_SNAKE_CASE ) elif "squad" in finetuning_task: a__ : Optional[int] =finetuning_task a__ : Dict =XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE ) else: a__ : List[Any] =XLNetLMHeadModel(SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Save pytorch-model a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) print(f'''Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE ) print(f'''Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' ) with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--xlnet_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained XLNet model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the folder to store the PyTorch model or dataset/vocab.""", ) parser.add_argument( """--finetuning_task""", default=None, type=str, help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""", ) UpperCAmelCase : int = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
95
0
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class __snake_case : '''simple docstring''' def __init__( self : Union[str, Any] , A : Tuple , ): __snake_case: Optional[Any] = parent __snake_case: List[Any] = 13 __snake_case: Optional[Any] = 7 __snake_case: List[Any] = 30 __snake_case: Optional[int] = self.seq_length + self.mem_len __snake_case: str = 15 __snake_case: Tuple = True __snake_case: Tuple = True __snake_case: int = 99 __snake_case: Union[str, Any] = [10, 50, 80] __snake_case: Dict = 32 __snake_case: List[Any] = 32 __snake_case: Dict = 4 __snake_case: int = 8 __snake_case: Tuple = 128 __snake_case: Union[str, Any] = 2 __snake_case: Tuple = 2 __snake_case: Any = None __snake_case: List[str] = 1 __snake_case: Optional[Any] = 0 __snake_case: Any = 3 __snake_case: Any = self.vocab_size - 1 __snake_case: int = 0.01 def UpperCAmelCase__ ( self : Tuple ): __snake_case: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case: Optional[int] = None if self.use_labels: __snake_case: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case: Union[str, Any] = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def UpperCAmelCase__ ( self : List[str] ): random.seed(self.seed ) tf.random.set_seed(self.seed ) def UpperCAmelCase__ ( self : Any , A : int , A : str , A : List[Any] , A : int ): __snake_case: List[Any] = TFTransfoXLModel(lowerCAmelCase__ ) __snake_case: Dict = model(lowerCAmelCase__ ).to_tuple() __snake_case: List[Any] = {"input_ids": input_ids_a, "mems": mems_a} __snake_case: str = model(lowerCAmelCase__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def UpperCAmelCase__ ( self : int , A : List[str] , A : List[Any] , A : Optional[Any] , A : List[str] ): __snake_case: Union[str, Any] = TFTransfoXLLMHeadModel(lowerCAmelCase__ ) __snake_case: Optional[Any] = model(lowerCAmelCase__ ).to_tuple() __snake_case: Optional[int] = {"input_ids": input_ids_a, "labels": lm_labels} __snake_case: Optional[int] = model(lowerCAmelCase__ ).to_tuple() __snake_case: int = model([input_ids_a, mems_a] ).to_tuple() __snake_case: int = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels} __snake_case: str = model(lowerCAmelCase__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def UpperCAmelCase__ ( self : int , A : Optional[int] , A : List[str] , A : int , A : Optional[int] ): __snake_case: Optional[Any] = TFTransfoXLForSequenceClassification(lowerCAmelCase__ ) __snake_case: int = model(lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : Any ): __snake_case: int = self.prepare_config_and_inputs() (__snake_case): List[Any] = config_and_inputs __snake_case: List[str] = {"input_ids": input_ids_a} return config, inputs_dict @require_tf class __snake_case ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) lowerCAmelCase__ = () if is_tf_available() else () lowerCAmelCase__ = ( { """feature-extraction""": TFTransfoXLModel, """text-classification""": TFTransfoXLForSequenceClassification, """text-generation""": TFTransfoXLLMHeadModel, """zero-shot""": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def UpperCAmelCase__ ( self : List[str] , A : str , A : List[Any] , A : int , A : List[str] , A : str ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def UpperCAmelCase__ ( self : Dict ): __snake_case: Tuple = TFTransfoXLModelTester(self ) __snake_case: List[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , d_embed=37 ) def UpperCAmelCase__ ( self : List[Any] ): self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[Any] ): self.model_tester.set_seed() __snake_case: List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*lowerCAmelCase__ ) def UpperCAmelCase__ ( self : str ): self.model_tester.set_seed() __snake_case: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCAmelCase__ ) def UpperCAmelCase__ ( self : Optional[int] ): __snake_case: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCAmelCase__ ) def UpperCAmelCase__ ( self : str ): __snake_case: List[str] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case: str = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: __snake_case: Dict = model_class(lowerCAmelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: __snake_case: Union[str, Any] = model.get_output_embeddings() assert isinstance(lowerCAmelCase__ , tf.keras.layers.Layer ) __snake_case: str = model.get_bias() assert name is None else: __snake_case: Dict = model.get_output_embeddings() assert x is None __snake_case: Dict = model.get_bias() assert name is None def UpperCAmelCase__ ( self : Union[str, Any] ): pass @slow def UpperCAmelCase__ ( self : str ): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case: str = TFTransfoXLModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) @unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" ) def UpperCAmelCase__ ( self : Union[str, Any] ): pass @require_tf class __snake_case ( unittest.TestCase ): '''simple docstring''' @unittest.skip("""Skip test until #12651 is resolved.""" ) @slow def UpperCAmelCase__ ( self : Tuple ): __snake_case: str = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" ) # fmt: off __snake_case: Tuple = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off __snake_case: Union[str, Any] = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> __snake_case: List[Any] = model.generate(lowerCAmelCase__ , max_length=200 , do_sample=lowerCAmelCase__ ) self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ )
111
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase : Optional[int] = { """google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""", # See all CANINE models at https://huggingface.co/models?filter=canine } class __lowerCAmelCase ( UpperCamelCase__): _lowercase : List[Any] = """canine""" def __init__( self , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__=0XE0_00 , lowerCAmelCase__=0XE0_01 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=8 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1_2_8 , **lowerCAmelCase__ , ) -> Dict: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) a__ : Optional[int] =max_position_embeddings a__ : str =hidden_size a__ : Optional[Any] =num_hidden_layers a__ : Tuple =num_attention_heads a__ : Optional[Any] =intermediate_size a__ : Optional[int] =hidden_act a__ : List[Any] =hidden_dropout_prob a__ : Union[str, Any] =attention_probs_dropout_prob a__ : Optional[Any] =initializer_range a__ : Union[str, Any] =type_vocab_size a__ : Optional[int] =layer_norm_eps # Character config: a__ : int =downsampling_rate a__ : Optional[Any] =upsampling_kernel_size a__ : Union[str, Any] =num_hash_functions a__ : Any =num_hash_buckets a__ : int =local_transformer_stride
95
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer __A : int = logging.get_logger(__name__) __A : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} __A : List[Any] = [ """small""", """small-base""", """medium""", """medium-base""", """intermediate""", """intermediate-base""", """large""", """large-base""", """xlarge""", """xlarge-base""", ] __A : Optional[int] = { """vocab_file""": { """funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""", """funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""", """funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""", """funnel-transformer/medium-base""": ( """https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt""" ), """funnel-transformer/intermediate""": ( """https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt""" ), """funnel-transformer/intermediate-base""": ( """https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt""" ), """funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""", """funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""", """funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""", """funnel-transformer/xlarge-base""": ( """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""", """funnel-transformer/small-base""": ( """https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json""" ), """funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""", """funnel-transformer/medium-base""": ( """https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json""" ), """funnel-transformer/intermediate""": ( """https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json""" ), """funnel-transformer/intermediate-base""": ( """https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json""" ), """funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""", """funnel-transformer/large-base""": ( """https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json""" ), """funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""", """funnel-transformer/xlarge-base""": ( """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json""" ), }, } __A : Optional[int] = {F'funnel-transformer/{name}': 512 for name in _model_names} __A : Optional[int] = {F'funnel-transformer/{name}': {"""do_lower_case""": True} for name in _model_names} class __A ( UpperCamelCase__ ): lowerCAmelCase_ : str = VOCAB_FILES_NAMES lowerCAmelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase_ : Union[str, Any] = FunnelTokenizer lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ : int = 2 def __init__( self : str , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]="<unk>" , UpperCAmelCase_ : Union[str, Any]="<sep>" , UpperCAmelCase_ : Tuple="<pad>" , UpperCAmelCase_ : str="<cls>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : Optional[Any]="<s>" , UpperCAmelCase_ : Optional[int]="</s>" , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]="##" , **UpperCAmelCase_ : List[str] , ): super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , clean_text=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , wordpieces_prefix=lowerCAmelCase__ , **lowerCAmelCase__ , ) lowerCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , lowerCAmelCase__ ) != do_lower_case or normalizer_state.get('strip_accents' , lowerCAmelCase__ ) != strip_accents or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase__ ) != tokenize_chinese_chars ): lowerCAmelCase : List[str] = getattr(lowerCAmelCase__ , normalizer_state.pop('type' ) ) lowerCAmelCase : Union[str, Any] = do_lower_case lowerCAmelCase : Any = strip_accents lowerCAmelCase : Optional[Any] = tokenize_chinese_chars lowerCAmelCase : Dict = normalizer_class(**lowerCAmelCase__ ) lowerCAmelCase : Any = do_lower_case def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int=None ): lowerCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : str = None ): lowerCAmelCase : Optional[int] = [self.sep_token_id] lowerCAmelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int = None ): lowerCAmelCase : Tuple = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ ) return tuple(lowerCAmelCase__ )
138
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device UpperCAmelCase : int = False class __lowerCAmelCase ( unittest.TestCase): pass @nightly @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase): def _lowercase ( self ) -> Tuple: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self ) -> Any: '''simple docstring''' a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) a__ : int =load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) a__ : Optional[Any] =torch.manual_seed(0 ) a__ : Optional[Any] =pipe.dual_guided( prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCAmelCase__ ) a__ : str =VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) a__ : Optional[Any] =generator.manual_seed(0 ) a__ : Tuple =pipe.dual_guided( prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _lowercase ( self ) -> Any: '''simple docstring''' a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) a__ : Optional[Any] ="cyberpunk 2077" a__ : int =load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) a__ : Union[str, Any] =torch.manual_seed(0 ) a__ : Tuple =pipe.dual_guided( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images a__ : int =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a__ : Any =np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 a__ : str ="A painting of a squirrel eating a burger " a__ : Optional[int] =torch.manual_seed(0 ) a__ : str =pipe.text_to_image( prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" ).images a__ : Any =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a__ : Optional[int] =np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 a__ : Optional[Any] =pipe.image_variation(lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="numpy" ).images a__ : Union[str, Any] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a__ : Any =np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
95
0
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _lowerCamelCase : List[Any] = logging.get_logger(__name__) class lowercase ( UpperCamelCase__): __lowerCAmelCase : Optional[Any] = ["""input_features"""] def __init__( self : Union[str, Any] , _lowerCamelCase : int=80 , _lowerCamelCase : List[Any]=1_60_00 , _lowerCamelCase : Optional[Any]=1_60 , _lowerCamelCase : str=30 , _lowerCamelCase : str=4_00 , _lowerCamelCase : int=0.0 , _lowerCamelCase : Any=False , **_lowerCamelCase : List[str] , ): """simple docstring""" super().__init__( feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , ) A_ : Any = n_fft A_ : List[Any] = hop_length A_ : Any = chunk_length A_ : List[Any] = chunk_length * sampling_rate A_ : Tuple = self.n_samples // hop_length A_ : str = sampling_rate A_ : Dict = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase__ , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=lowerCAmelCase__ , norm='''slaney''' , mel_scale='''slaney''' , ) def a_ ( self : Any , _lowerCamelCase : Any ): """simple docstring""" A_ : Tuple = spectrogram( lowerCAmelCase__ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , ) A_ : List[Any] = log_spec[:, :-1] A_ : Optional[int] = np.maximum(lowerCAmelCase__ , log_spec.max() - 8.0 ) A_ : Optional[Any] = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def a_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict = 0.0 ): """simple docstring""" if attention_mask is not None: A_ : Union[str, Any] = np.array(lowerCAmelCase__ , np.intaa ) A_ : str = [] for vector, length in zip(lowerCAmelCase__ , attention_mask.sum(-1 ) ): A_ : Union[str, Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: A_ : Union[str, Any] = padding_value normed_input_values.append(lowerCAmelCase__ ) else: A_ : Tuple = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] = True , _lowerCamelCase : List[str] = None , _lowerCamelCase : Tuple = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[Any] = "max_length" , _lowerCamelCase : int = None , _lowerCamelCase : Any = None , _lowerCamelCase : Any = None , **_lowerCamelCase : Union[str, Any] , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A_ : Union[str, Any] = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) A_ : List[Any] = is_batched_numpy or ( isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A_ : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ): A_ : Union[str, Any] = np.asarray(lowerCAmelCase__ , dtype=np.floataa ) elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A_ : Union[str, Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A_ : Union[str, Any] = [np.asarray([raw_speech] ).T] A_ : Optional[Any] = BatchFeature({'''input_features''': raw_speech} ) # convert into correct format for padding A_ : List[str] = self.pad( lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=max_length if max_length else self.n_samples , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: A_ : List[str] = self.zero_mean_unit_var_norm( padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , ) A_ : Tuple = np.stack(padded_inputs['''input_features'''] , axis=0 ) # make sure list is in array format A_ : int = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 ) A_ : Optional[Any] = [self._np_extract_fbank_features(lowerCAmelCase__ ) for waveform in input_features[0]] if isinstance(input_features[0] , lowerCAmelCase__ ): A_ : Dict = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in input_features] else: A_ : int = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) A_ : Optional[int] = padded_inputs["attention_mask"][:, :: self.hop_length] if return_tensors is not None: A_ : Optional[int] = padded_inputs.convert_to_tensors(lowerCAmelCase__ ) return padded_inputs def a_ ( self : List[Any] ): """simple docstring""" A_ : List[Any] = copy.deepcopy(self.__dict__ ) A_ : Optional[int] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
167
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class __lowerCAmelCase : def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]: '''simple docstring''' raise NotImplementedError() def _lowercase ( self ) -> int: '''simple docstring''' raise NotImplementedError() class __lowerCAmelCase ( UpperCamelCase__): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , **lowerCAmelCase__ ) -> int: '''simple docstring''' a__ : str =tokenizer a__ : List[str] =skip_prompt a__ : List[Any] =decode_kwargs # variables used in the streaming process a__ : Dict =[] a__ : int =0 a__ : str =True def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError("TextStreamer only supports batch size 1" ) elif len(value.shape ) > 1: a__ : Any =value[0] if self.skip_prompt and self.next_tokens_are_prompt: a__ : Dict =False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith("\n" ): a__ : List[Any] =text[self.print_len :] a__ : List[str] =[] a__ : Optional[int] =0 # If the last token is a CJK character, we print the characters. elif len(lowerCAmelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ): a__ : List[str] =text[self.print_len :] self.print_len += len(lowerCAmelCase__ ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: a__ : str =text[self.print_len : text.rfind(" " ) + 1] self.print_len += len(lowerCAmelCase__ ) self.on_finalized_text(lowerCAmelCase__ ) def _lowercase ( self ) -> Any: '''simple docstring''' if len(self.token_cache ) > 0: a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) a__ : List[Any] =text[self.print_len :] a__ : List[str] =[] a__ : Optional[int] =0 else: a__ : Union[str, Any] ="" a__ : Any =True self.on_finalized_text(lowerCAmelCase__ , stream_end=lowerCAmelCase__ ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Optional[Any]: '''simple docstring''' print(lowerCAmelCase__ , flush=lowerCAmelCase__ , end="" if not stream_end else None ) def _lowercase ( self , lowerCAmelCase__ ) -> str: '''simple docstring''' if ( (cp >= 0X4E_00 and cp <= 0X9F_FF) or (cp >= 0X34_00 and cp <= 0X4D_BF) # or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) # or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) # or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) # or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) # or (cp >= 0XF9_00 and cp <= 0XFA_FF) or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) # ): # return True return False class __lowerCAmelCase ( UpperCamelCase__): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> Optional[Any]: '''simple docstring''' super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) a__ : str =Queue() a__ : Optional[Any] =None a__ : Any =timeout def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> List[str]: '''simple docstring''' self.text_queue.put(lowerCAmelCase__ , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self ) -> Dict: '''simple docstring''' return self def _lowercase ( self ) -> int: '''simple docstring''' a__ : int =self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
95
0
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]: return (data["data"], data["target"]) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]: lowerCAmelCase_ : Tuple = XGBClassifier() classifier.fit(lowerCAmelCase_ , lowerCAmelCase_ ) return classifier def lowerCAmelCase ( )-> Optional[Any]: lowerCAmelCase_ : Optional[Any] = load_iris() lowerCAmelCase_ : Union[str, Any] = data_handling(lowerCAmelCase_ ) lowerCAmelCase_ : int = train_test_split( lowerCAmelCase_ , lowerCAmelCase_ , test_size=0.25 ) lowerCAmelCase_ : List[str] = iris["target_names"] # Create an XGBoost Classifier from the training data lowerCAmelCase_ : Union[str, Any] = xgboost(lowerCAmelCase_ , lowerCAmelCase_ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , display_labels=lowerCAmelCase_ , cmap='''Blues''' , normalize='''true''' , ) plt.title('''Normalized Confusion Matrix - IRIS Dataset''' ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
262
def _A ( SCREAMING_SNAKE_CASE : int = 50 ): """simple docstring""" a__ : Any =[1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F"""{solution() = }""")
95
0
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {"""vocab_file""": """vocab.json"""} UpperCamelCase__ = { """vocab_file""": { """mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""", } } UpperCamelCase__ = {"""mgp-str""": 27} class a__ ( UpperCamelCase__ ): _a : Dict = VOCAB_FILES_NAMES _a : Any = PRETRAINED_VOCAB_FILES_MAP _a : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _A , _A="[GO]" , _A="[GO]" , _A="[s]" , _A="[GO]" , **_A ): """simple docstring""" super().__init__( unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , **lowerCAmelCase__ , ) with open(lowerCAmelCase__ , encoding="utf-8" ) as vocab_handle: __lowerCAmelCase = json.load(lowerCAmelCase__ ) __lowerCAmelCase = {v: k for k, v in self.vocab.items()} @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return len(self.vocab ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = [] for s in text: char_tokens.extend(lowerCAmelCase__ ) return char_tokens def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" return self.vocab.get(lowerCAmelCase__ , self.vocab.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" return self.decoder.get(lowerCAmelCase__ ) def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase__ ): logger.error("Vocabulary path ({}) should be a directory".format(lowerCAmelCase__ ) ) return __lowerCAmelCase = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + "\n" ) return (vocab_file,)
92
from __future__ import annotations def _A ( SCREAMING_SNAKE_CASE : list ): """simple docstring""" if len(SCREAMING_SNAKE_CASE ) == 0: return [] a__ , a__ : int =min(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE ) a__ : Optional[int] =int(max_value - min_value ) + 1 a__ : list[list] =[[] for _ in range(SCREAMING_SNAKE_CASE )] for i in my_list: buckets[int(i - min_value )].append(SCREAMING_SNAKE_CASE ) return [v for bucket in buckets for v in sorted(SCREAMING_SNAKE_CASE )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
95
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging lowercase = logging.get_logger(__name__) lowercase = """▁""" lowercase = {"""vocab_file""": """sentencepiece.bpe.model"""} lowercase = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), } } lowercase = { """facebook/mbart-large-en-ro""": 1024, """facebook/mbart-large-cc25""": 1024, } # fmt: off lowercase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class UpperCamelCase_ ( UpperCamelCase__ ): '''simple docstring''' lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = ["""input_ids""", """attention_mask"""] lowerCAmelCase = [] lowerCAmelCase = [] def __init__( self , a , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a=None , a=None , a=None , a = None , a=None , **a , ) -> List[str]: snake_case_ = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , ) snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCAmelCase__ ) ) snake_case_ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token snake_case_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab snake_case_ = 1 snake_case_ = len(self.sp_model ) snake_case_ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__ ) } snake_case_ = {v: k for k, v in self.lang_code_to_id.items()} snake_case_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} snake_case_ = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) snake_case_ = src_lang if src_lang is not None else "en_XX" snake_case_ = self.lang_code_to_id[self._src_lang] snake_case_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ) -> List[Any]: snake_case_ = self.__dict__.copy() snake_case_ = None snake_case_ = self.sp_model.serialized_model_proto() return state def __setstate__( self , a ) -> str: snake_case_ = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def _UpperCamelCase ( self ) -> str: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _UpperCamelCase ( self ) -> str: return self._src_lang @src_lang.setter def _UpperCamelCase ( self , a ) -> None: snake_case_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _UpperCamelCase ( self , a , a = None , a = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ ) snake_case_ = [1] * len(self.prefix_tokens ) snake_case_ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones def _UpperCamelCase ( self , a , a = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _UpperCamelCase ( self , a , a = None ) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCamelCase ( self , a , a , a , a , **a ) -> Union[str, Any]: if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) snake_case_ = src_lang snake_case_ = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__ ) snake_case_ = tgt_lang_id return inputs def _UpperCamelCase ( self ) -> Dict: snake_case_ = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _UpperCamelCase ( self , a ) -> List[str]: return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ ) def _UpperCamelCase ( self , a ) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case_ = self.sp_model.PieceToId(lowerCAmelCase__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _UpperCamelCase ( self , a ) -> Optional[Any]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _UpperCamelCase ( self , a ) -> Dict: snake_case_ = "".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , ' ' ).strip() return out_string def _UpperCamelCase ( self , a , a = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ = os.path.join( lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase__ , 'wb' ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase__ ) return (out_vocab_file,) def _UpperCamelCase ( self , a , a = "en_XX" , a = None , a = "ro_RO" , **a , ) -> BatchEncoding: snake_case_ = src_lang snake_case_ = tgt_lang return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) def _UpperCamelCase ( self ) -> int: return self.set_src_lang_special_tokens(self.src_lang ) def _UpperCamelCase ( self ) -> Tuple: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _UpperCamelCase ( self , a ) -> None: snake_case_ = self.lang_code_to_id[src_lang] snake_case_ = [] snake_case_ = [self.eos_token_id, self.cur_lang_code] def _UpperCamelCase ( self , a ) -> None: snake_case_ = self.lang_code_to_id[lang] snake_case_ = [] snake_case_ = [self.eos_token_id, self.cur_lang_code]
178
import numpy as np def _A ( SCREAMING_SNAKE_CASE : np.array ): """simple docstring""" return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
95
0
"""simple docstring""" import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging __snake_case = { """cola""": 2, """mnli""": 3, """mrpc""": 2, """sst-2""": 2, """sts-b""": 1, """qqp""": 2, """qnli""": 2, """rte""": 2, """wnli""": 2, } logging.set_verbosity_info() def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Dict, _lowerCAmelCase : Union[str, Any]=None ): """simple docstring""" _a = XLNetConfig.from_json_file(_lowerCAmelCase ) _a = finetuning_task.lower() if finetuning_task is not None else "" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' ) _a = finetuning_task _a = GLUE_TASKS_NUM_LABELS[finetuning_task] _a = XLNetForSequenceClassification(_lowerCAmelCase ) elif "squad" in finetuning_task: _a = finetuning_task _a = XLNetForQuestionAnswering(_lowerCAmelCase ) else: _a = XLNetLMHeadModel(_lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) # Save pytorch-model _a = os.path.join(_lowerCAmelCase, _lowerCAmelCase ) _a = os.path.join(_lowerCAmelCase, _lowerCAmelCase ) print(f'Save PyTorch model to {os.path.abspath(_lowerCAmelCase )}' ) torch.save(model.state_dict(), _lowerCAmelCase ) print(f'Save configuration file to {os.path.abspath(_lowerCAmelCase )}' ) with open(_lowerCAmelCase, '''w''', encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--xlnet_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained XLNet model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--finetuning_task''', default=None, type=str, help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''', ) __snake_case = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
320
import numpy # List of input, output pairs UpperCAmelCase : str = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) UpperCAmelCase : Optional[int] = (((515, 22, 13), 555), ((61, 35, 49), 150)) UpperCAmelCase : str = [2, 4, 1, 5] UpperCAmelCase : List[str] = len(train_data) UpperCAmelCase : Dict = 0.0_0_9 def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple="train" ): """simple docstring""" return calculate_hypothesis_value(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - output( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def _A ( SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" a__ : Tuple =0 for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=m ): """simple docstring""" a__ : Any =0 for i in range(SCREAMING_SNAKE_CASE ): if index == -1: summation_value += _error(SCREAMING_SNAKE_CASE ) else: summation_value += _error(SCREAMING_SNAKE_CASE ) * train_data[i][0][index] return summation_value def _A ( SCREAMING_SNAKE_CASE : int ): """simple docstring""" a__ : Any =summation_of_cost_derivative(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / m return cost_derivative_value def _A ( ): """simple docstring""" global parameter_vector # Tune these values to set a tolerance value for predicted output a__ : Dict =0.0_0_0_0_0_2 a__ : Union[str, Any] =0 a__ : Any =0 while True: j += 1 a__ : Any =[0, 0, 0, 0] for i in range(0 , len(SCREAMING_SNAKE_CASE ) ): a__ : Tuple =get_cost_derivative(i - 1 ) a__ : List[Any] =( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE , rtol=SCREAMING_SNAKE_CASE , ): break a__ : Optional[Any] =temp_parameter_vector print(("Number of iterations:", j) ) def _A ( ): """simple docstring""" for i in range(len(SCREAMING_SNAKE_CASE ) ): print(("Actual output value:", output(SCREAMING_SNAKE_CASE , "test" )) ) print(("Hypothesis output:", calculate_hypothesis_value(SCREAMING_SNAKE_CASE , "test" )) ) if __name__ == "__main__": run_gradient_descent() print("""\nTesting gradient descent for a linear hypothesis function.\n""") test_gradient_descent()
95
0
"""simple docstring""" import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () lowercase__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). lowercase__ = [0, 25, 50] lowercase__ = [25, 50, 75] lowercase__ = fuzz.membership.trimf(X, abca) lowercase__ = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. lowercase__ = np.ones(75) lowercase__ = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) lowercase__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) lowercase__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) lowercase__ = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) lowercase__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] lowercase__ = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) lowercase__ = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] lowercase__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] lowercase__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title("""Young""") plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title("""Middle aged""") plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title("""union""") plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title("""intersection""") plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title("""complement_a""") plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title("""difference a/b""") plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title("""alg_sum""") plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title("""alg_product""") plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title("""bdd_sum""") plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title("""bdd_difference""") plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
96
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowercase__ = 16 lowercase__ = 32 def _snake_case ( lowercase__ , lowercase__ = 16 , lowercase__ = "bert-base-cased" ): _lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(lowercase__ ) _lowerCamelCase : Tuple = load_dataset('glue' , 'mrpc' ) def tokenize_function(lowercase__ ): # max_length=None => use the model max length (it's actually the default) _lowerCamelCase : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase__ , max_length=lowercase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset _lowerCamelCase : int = datasets.map( lowercase__ , batched=lowercase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowercase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowerCamelCase : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(lowercase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowercase__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(lowercase__ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. _lowerCamelCase : List[str] = DataLoader( tokenized_datasets['train'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) _lowerCamelCase : int = DataLoader( tokenized_datasets['validation'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) return train_dataloader, eval_dataloader def _snake_case ( lowercase__ , lowercase__ ): # Initialize accelerator _lowerCamelCase : Optional[int] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCamelCase : Optional[int] = config['lr'] _lowerCamelCase : Optional[int] = int(config['num_epochs'] ) _lowerCamelCase : Union[str, Any] = int(config['seed'] ) _lowerCamelCase : Optional[int] = int(config['batch_size'] ) _lowerCamelCase : Dict = args.model_name_or_path set_seed(lowercase__ ) _lowerCamelCase, _lowerCamelCase : Optional[int] = get_dataloaders(lowercase__ , lowercase__ , lowercase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCamelCase : int = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ ) # Instantiate optimizer _lowerCamelCase : Optional[int] = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) _lowerCamelCase : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=lowercase__ ) if accelerator.state.deepspeed_plugin is not None: _lowerCamelCase : str = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: _lowerCamelCase : Tuple = 1 _lowerCamelCase : List[Any] = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): _lowerCamelCase : Tuple = get_linear_schedule_with_warmup( optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , ) else: _lowerCamelCase : Any = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = accelerator.prepare( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # We need to keep track of how many total steps we have iterated over _lowerCamelCase : Union[str, Any] = 0 # We also need to keep track of the stating epoch so files are named properly _lowerCamelCase : Dict = 0 # Now we train the model _lowerCamelCase : Dict = evaluate.load('glue' , 'mrpc' ) _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : str = {} for epoch in range(lowercase__ , lowercase__ ): model.train() for step, batch in enumerate(lowercase__ ): _lowerCamelCase : List[Any] = model(**lowercase__ ) _lowerCamelCase : int = outputs.loss _lowerCamelCase : Dict = loss / gradient_accumulation_steps accelerator.backward(lowercase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() _lowerCamelCase : Union[str, Any] = 0 for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**lowercase__ ) _lowerCamelCase : Dict = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times _lowerCamelCase, _lowerCamelCase : List[str] = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowercase__ ) - 1: _lowerCamelCase : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen] _lowerCamelCase : Dict = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowercase__ , references=lowercase__ , ) _lowerCamelCase : List[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , lowercase__ ) _lowerCamelCase : Tuple = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: _lowerCamelCase : str = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(lowercase__ , lowercase__ ) def _snake_case ( ): _lowerCamelCase : Any = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=lowercase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowercase__ , ) parser.add_argument( '--output_dir' , type=lowercase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=lowercase__ , default=lowercase__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=lowercase__ , default=3 , help='Number of train epochs.' , ) _lowerCamelCase : Optional[Any] = parser.parse_args() _lowerCamelCase : str = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(lowercase__ , lowercase__ ) if __name__ == "__main__": main()
96
1
"""simple docstring""" import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration lowercase__ = 5_0000 lowercase__ = 5000 lowercase__ , lowercase__ = os.path.split(__file__) lowercase__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def _snake_case ( lowercase__ , lowercase__ ): for i in range(lowercase__ ): _lowerCamelCase : Optional[Any] = dataset[i] @get_duration def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): for i in range(0 , len(lowercase__ ) , lowercase__ ): _lowerCamelCase : str = dataset[i : i + batch_size] @get_duration def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): with dataset.formatted_as(type=lowercase__ ): for i in range(lowercase__ ): _lowerCamelCase : Optional[Any] = dataset[i] @get_duration def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): with dataset.formatted_as(type=lowercase__ ): for i in range(0 , lowercase__ , lowercase__ ): _lowerCamelCase : Tuple = dataset[i : i + batch_size] def _snake_case ( ): _lowerCamelCase : List[str] = {'num examples': SPEED_TEST_N_EXAMPLES} _lowerCamelCase : str = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] _lowerCamelCase : Dict = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) _lowerCamelCase : List[Any] = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) _lowerCamelCase : List[Any] = generate_example_dataset( os.path.join(lowercase__ , 'dataset.arrow' ) , lowercase__ , num_examples=lowercase__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(lowercase__ ) ) _lowerCamelCase : int = func(lowercase__ , **lowercase__ ) print('shuffling dataset' ) _lowerCamelCase : Dict = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(lowercase__ ) ) _lowerCamelCase : List[str] = func( lowercase__ , **lowercase__ ) with open(lowercase__ , 'wb' ) as f: f.write(json.dumps(lowercase__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
96
"""simple docstring""" from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """new-model""" if is_tf_available(): class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = NewModelConfig @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def A_ ( self ): _lowerCamelCase : List[str] = 'bert-base-cased' _lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): _lowerCamelCase : List[str] = 'bert-base-cased' _lowerCamelCase : int = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : int = TFAutoModelForPreTraining.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : int = TFAutoModelForCausalLM.from_pretrained(lowercase ) _lowerCamelCase, _lowerCamelCase : str = TFAutoModelForCausalLM.from_pretrained(lowercase , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : List[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : str = TFAutoModelWithLMHead.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Tuple = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase ) _lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForMaskedLM.from_pretrained(lowercase , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase ) _lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: _lowerCamelCase : str = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: _lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : List[str] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow @require_tensorflow_probability def A_ ( self ): for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: _lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase ) _lowerCamelCase, _lowerCamelCase : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained( lowercase , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) def A_ ( self ): _lowerCamelCase : int = TFAutoModelWithLMHead.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 ) def A_ ( self ): _lowerCamelCase : Any = TFAutoModelWithLMHead.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 ) def A_ ( self ): # For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel _lowerCamelCase : List[str] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Optional[int] = copy.deepcopy(model.config ) _lowerCamelCase : Dict = ['FunnelBaseModel'] _lowerCamelCase : List[Any] = TFAutoModel.from_config(lowercase ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowercase ) _lowerCamelCase : Tuple = TFAutoModel.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) def A_ ( self ): try: AutoConfig.register('new-model' , lowercase ) _lowerCamelCase : Tuple = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(lowercase ): auto_class.register(lowercase , lowercase ) auto_class.register(lowercase , lowercase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase ): auto_class.register(lowercase , lowercase ) # Now that the config is registered, it can be used as any other config with the auto-API _lowerCamelCase : Optional[Any] = BertModelTester(self ).get_config() _lowerCamelCase : Dict = NewModelConfig(**tiny_config.to_dict() ) _lowerCamelCase : int = auto_class.from_config(lowercase ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowercase ) _lowerCamelCase : List[Any] = auto_class.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def A_ ( self ): with self.assertRaisesRegex( lowercase , 'bert-base is not a local folder and is not a valid model identifier' ): _lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained('bert-base' ) def A_ ( self ): with self.assertRaisesRegex( lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): _lowerCamelCase : str = TFAutoModel.from_pretrained(lowercase , revision='aaaaaa' ) def A_ ( self ): with self.assertRaisesRegex( lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ): _lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' ) def A_ ( self ): with self.assertRaisesRegex(lowercase , 'Use `from_pt=True` to load this model' ): _lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' ) def A_ ( self ): # Make sure we have cached the model. _lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) with RequestCounter() as counter: _lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint _lowerCamelCase : int = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) with RequestCounter() as counter: _lowerCamelCase : List[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
96
1
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=lowercase ) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = field(default="""text-classification""", metadata={"""include_in_asdict_even_if_is_default""": True} ) lowerCamelCase__ = Features({"""text""": Value("""string""" )} ) lowerCamelCase__ = Features({"""labels""": ClassLabel} ) lowerCamelCase__ = "text" lowerCamelCase__ = "labels" def A_ ( self , lowercase ): if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowercase ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) _lowerCamelCase : Optional[Any] = copy.deepcopy(self ) _lowerCamelCase : List[str] = self.label_schema.copy() _lowerCamelCase : Optional[int] = features[self.label_column] _lowerCamelCase : Optional[int] = label_schema return task_template @property def A_ ( self ): return { self.text_column: "text", self.label_column: "labels", }
96
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """IBertForMaskedLM""", """IBertForMultipleChoice""", """IBertForQuestionAnswering""", """IBertForSequenceClassification""", """IBertForTokenClassification""", """IBertModel""", """IBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
96
1
"""simple docstring""" class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase ): _lowerCamelCase : Any = None _lowerCamelCase : Dict = None _lowerCamelCase : List[str] = graph self._normalize_graph(lowercase , lowercase ) _lowerCamelCase : Optional[Any] = len(lowercase ) _lowerCamelCase : Optional[Any] = None def A_ ( self , lowercase , lowercase ): if sources is int: _lowerCamelCase : List[str] = [sources] if sinks is int: _lowerCamelCase : Dict = [sinks] if len(lowercase ) == 0 or len(lowercase ) == 0: return _lowerCamelCase : Union[str, Any] = sources[0] _lowerCamelCase : Tuple = sinks[0] # make fake vertex if there are more # than one source or sink if len(lowercase ) > 1 or len(lowercase ) > 1: _lowerCamelCase : Tuple = 0 for i in sources: max_input_flow += sum(self.graph[i] ) _lowerCamelCase : Tuple = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _lowerCamelCase : List[Any] = max_input_flow _lowerCamelCase : Any = 0 _lowerCamelCase : Tuple = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _lowerCamelCase : int = max_input_flow _lowerCamelCase : List[str] = size - 1 def A_ ( self ): if self.maximum_flow_algorithm is None: raise Exception('You need to set maximum flow algorithm before.' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def A_ ( self , lowercase ): _lowerCamelCase : List[Any] = algorithm(self ) class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase ): _lowerCamelCase : Any = flow_network _lowerCamelCase : Optional[int] = flow_network.verticesCount _lowerCamelCase : Tuple = flow_network.sourceIndex _lowerCamelCase : int = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _lowerCamelCase : Dict = flow_network.graph _lowerCamelCase : Tuple = False def A_ ( self ): if not self.executed: self._algorithm() _lowerCamelCase : Any = True def A_ ( self ): pass class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , lowercase ): super().__init__(lowercase ) # use this to save your result _lowerCamelCase : str = -1 def A_ ( self ): if not self.executed: raise Exception('You should execute algorithm before using its result!' ) return self.maximum_flow class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , lowercase ): super().__init__(lowercase ) _lowerCamelCase : Dict = [[0] * self.verticies_count for i in range(self.verticies_count )] _lowerCamelCase : List[str] = [0] * self.verticies_count _lowerCamelCase : int = [0] * self.verticies_count def A_ ( self ): _lowerCamelCase : List[str] = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _lowerCamelCase : int = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _lowerCamelCase : int = 0 while i < len(lowercase ): _lowerCamelCase : Any = vertices_list[i] _lowerCamelCase : Union[str, Any] = self.heights[vertex_index] self.process_vertex(lowercase ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(lowercase ) ) _lowerCamelCase : List[Any] = 0 else: i += 1 _lowerCamelCase : Any = sum(self.preflow[self.source_index] ) def A_ ( self , lowercase ): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(lowercase , lowercase ) self.relabel(lowercase ) def A_ ( self , lowercase , lowercase ): _lowerCamelCase : Tuple = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def A_ ( self , lowercase ): _lowerCamelCase : Dict = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _lowerCamelCase : Any = self.heights[to_index] if min_height is not None: _lowerCamelCase : Optional[Any] = min_height + 1 if __name__ == "__main__": lowercase__ = [0] lowercase__ = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowercase__ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowercase__ = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowercase__ = flow_network.find_maximum_flow() print(F"maximum flow is {maximum_flow}")
96
"""simple docstring""" import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : Tuple = f'''{sampling_rate}''' _lowerCamelCase : str = '1' _lowerCamelCase : str = 'f32le' _lowerCamelCase : Union[str, Any] = [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(lowercase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: _lowerCamelCase : str = ffmpeg_process.communicate(lowercase__ ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error _lowerCamelCase : List[Any] = output_stream[0] _lowerCamelCase : Tuple = np.frombuffer(lowercase__ , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def _snake_case ( lowercase__ , lowercase__ , lowercase__ = "f32le" , ): _lowerCamelCase : Optional[Any] = f'''{sampling_rate}''' _lowerCamelCase : List[str] = '1' if format_for_conversion == "s16le": _lowerCamelCase : List[str] = 2 elif format_for_conversion == "f32le": _lowerCamelCase : List[Any] = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) _lowerCamelCase : Dict = platform.system() if system == "Linux": _lowerCamelCase : Optional[int] = 'alsa' _lowerCamelCase : Optional[Any] = 'default' elif system == "Darwin": _lowerCamelCase : Optional[int] = 'avfoundation' _lowerCamelCase : Any = ':0' elif system == "Windows": _lowerCamelCase : Tuple = 'dshow' _lowerCamelCase : Tuple = 'default' _lowerCamelCase : Optional[int] = [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] _lowerCamelCase : Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample _lowerCamelCase : List[Any] = _ffmpeg_stream(lowercase__ , lowercase__ ) for item in iterator: yield item def _snake_case ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = "f32le" , ): if stream_chunk_s is not None: _lowerCamelCase : int = stream_chunk_s else: _lowerCamelCase : Optional[Any] = chunk_length_s _lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowercase__ , lowercase__ , format_for_conversion=lowercase__ ) if format_for_conversion == "s16le": _lowerCamelCase : List[str] = np.intaa _lowerCamelCase : str = 2 elif format_for_conversion == "f32le": _lowerCamelCase : Any = np.floataa _lowerCamelCase : List[Any] = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: _lowerCamelCase : Union[str, Any] = chunk_length_s / 6 _lowerCamelCase : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowercase__ , (int, float) ): _lowerCamelCase : Any = [stride_length_s, stride_length_s] _lowerCamelCase : Tuple = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample _lowerCamelCase : Optional[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample _lowerCamelCase : List[Any] = datetime.datetime.now() _lowerCamelCase : Optional[int] = datetime.timedelta(seconds=lowercase__ ) for item in chunk_bytes_iter(lowercase__ , lowercase__ , stride=(stride_left, stride_right) , stream=lowercase__ ): # Put everything back in numpy scale _lowerCamelCase : List[Any] = np.frombuffer(item['raw'] , dtype=lowercase__ ) _lowerCamelCase : int = ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) _lowerCamelCase : Optional[int] = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ): _lowerCamelCase : int = B'' _lowerCamelCase, _lowerCamelCase : Dict = stride if stride_left + stride_right >= chunk_len: raise ValueError( f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) _lowerCamelCase : str = 0 for raw in iterator: acc += raw if stream and len(lowercase__ ) < chunk_len: _lowerCamelCase : Optional[int] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowercase__ ) >= chunk_len: # We are flushing the accumulator _lowerCamelCase : str = (_stride_left, stride_right) _lowerCamelCase : str = {'raw': acc[:chunk_len], 'stride': stride} if stream: _lowerCamelCase : List[Any] = False yield item _lowerCamelCase : Optional[Any] = stride_left _lowerCamelCase : str = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowercase__ ) > stride_left: _lowerCamelCase : Optional[Any] = {'raw': acc, 'stride': (_stride_left, 0)} if stream: _lowerCamelCase : Tuple = False yield item def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : int = 2**24 # 16Mo try: with subprocess.Popen(lowercase__ , stdout=subprocess.PIPE , bufsize=lowercase__ ) as ffmpeg_process: while True: _lowerCamelCase : Optional[Any] = ffmpeg_process.stdout.read(lowercase__ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
96
1
"""simple docstring""" import logging from transformers import PretrainedConfig lowercase__ = logging.getLogger(__name__) lowercase__ = { """bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""", } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """bertabs""" def __init__( self , lowercase=30522 , lowercase=512 , lowercase=6 , lowercase=512 , lowercase=8 , lowercase=512 , lowercase=0.2 , lowercase=6 , lowercase=768 , lowercase=8 , lowercase=2048 , lowercase=0.2 , **lowercase , ): super().__init__(**lowercase ) _lowerCamelCase : Any = vocab_size _lowerCamelCase : List[str] = max_pos _lowerCamelCase : List[str] = enc_layers _lowerCamelCase : Optional[int] = enc_hidden_size _lowerCamelCase : List[str] = enc_heads _lowerCamelCase : Optional[Any] = enc_ff_size _lowerCamelCase : Any = enc_dropout _lowerCamelCase : List[Any] = dec_layers _lowerCamelCase : List[str] = dec_hidden_size _lowerCamelCase : Union[str, Any] = dec_heads _lowerCamelCase : List[Any] = dec_ff_size _lowerCamelCase : List[str] = dec_dropout
96
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""} class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """ctrl""" lowerCamelCase__ = ["""past_key_values"""] lowerCamelCase__ = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , lowercase=246534 , lowercase=256 , lowercase=1280 , lowercase=8192 , lowercase=48 , lowercase=16 , lowercase=0.1 , lowercase=0.1 , lowercase=1E-6 , lowercase=0.02 , lowercase=True , **lowercase , ): _lowerCamelCase : Any = vocab_size _lowerCamelCase : Dict = n_positions _lowerCamelCase : Optional[int] = n_embd _lowerCamelCase : str = n_layer _lowerCamelCase : Union[str, Any] = n_head _lowerCamelCase : Any = dff _lowerCamelCase : int = resid_pdrop _lowerCamelCase : Dict = embd_pdrop _lowerCamelCase : Union[str, Any] = layer_norm_epsilon _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : str = use_cache super().__init__(**lowercase )
96
1
"""simple docstring""" import csv import tweepy # Twitter API credentials lowercase__ = """""" lowercase__ = """""" lowercase__ = """""" lowercase__ = """""" def _snake_case ( lowercase__ ): # authorize twitter, initialize tweepy _lowerCamelCase : List[Any] = tweepy.OAuthHandler(lowercase__ , lowercase__ ) auth.set_access_token(lowercase__ , lowercase__ ) _lowerCamelCase : Tuple = tweepy.API(lowercase__ ) # initialize a list to hold all the tweepy Tweets _lowerCamelCase : Any = [] # make initial request for most recent tweets (200 is the maximum allowed count) _lowerCamelCase : Any = api.user_timeline(screen_name=lowercase__ , count=200 ) # save most recent tweets alltweets.extend(lowercase__ ) # save the id of the oldest tweet less one _lowerCamelCase : str = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowercase__ ) > 0: print(f'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates _lowerCamelCase : List[str] = api.user_timeline( screen_name=lowercase__ , count=200 , max_id=lowercase__ ) # save most recent tweets alltweets.extend(lowercase__ ) # update the id of the oldest tweet less one _lowerCamelCase : Tuple = alltweets[-1].id - 1 print(f'''...{len(lowercase__ )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv _lowerCamelCase : Tuple = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'''new_{screen_name}_tweets.csv''' , 'w' ) as f: _lowerCamelCase : List[str] = csv.writer(lowercase__ ) writer.writerow(['id', 'created_at', 'text'] ) writer.writerows(lowercase__ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
96
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Any class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase ): _lowerCamelCase : Any = data _lowerCamelCase : Node | None = None class lowerCAmelCase__ : '''simple docstring''' def __init__( self ): _lowerCamelCase : str = None _lowerCamelCase : str = None def __iter__( self ): _lowerCamelCase : List[str] = self.head while self.head: yield node.data _lowerCamelCase : Optional[int] = node.next if node == self.head: break def __len__( self ): return sum(1 for _ in self ) def __repr__( self ): return "->".join(str(lowercase ) for item in iter(self ) ) def A_ ( self , lowercase ): self.insert_nth(len(self ) , lowercase ) def A_ ( self , lowercase ): self.insert_nth(0 , lowercase ) def A_ ( self , lowercase , lowercase ): if index < 0 or index > len(self ): raise IndexError('list index out of range.' ) _lowerCamelCase : List[Any] = Node(lowercase ) if self.head is None: _lowerCamelCase : str = new_node # first node points itself _lowerCamelCase : Union[str, Any] = new_node elif index == 0: # insert at head _lowerCamelCase : List[str] = self.head _lowerCamelCase : str = new_node else: _lowerCamelCase : Union[str, Any] = self.head for _ in range(index - 1 ): _lowerCamelCase : List[Any] = temp.next _lowerCamelCase : Union[str, Any] = temp.next _lowerCamelCase : List[str] = new_node if index == len(self ) - 1: # insert at tail _lowerCamelCase : Any = new_node def A_ ( self ): return self.delete_nth(0 ) def A_ ( self ): return self.delete_nth(len(self ) - 1 ) def A_ ( self , lowercase = 0 ): if not 0 <= index < len(self ): raise IndexError('list index out of range.' ) _lowerCamelCase : Any = self.head if self.head == self.tail: # just one node _lowerCamelCase : List[str] = None elif index == 0: # delete head node _lowerCamelCase : List[str] = self.tail.next.next _lowerCamelCase : Optional[int] = self.head.next else: _lowerCamelCase : Dict = self.head for _ in range(index - 1 ): _lowerCamelCase : List[Any] = temp.next _lowerCamelCase : int = temp.next _lowerCamelCase : Optional[int] = temp.next.next if index == len(self ) - 1: # delete at tail _lowerCamelCase : List[Any] = temp return delete_node.data def A_ ( self ): return len(self ) == 0 def _snake_case ( ): _lowerCamelCase : Union[str, Any] = CircularLinkedList() assert len(lowercase__ ) == 0 assert circular_linked_list.is_empty() is True assert str(lowercase__ ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(lowercase__ ) == i circular_linked_list.insert_nth(lowercase__ , i + 1 ) assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
96
1
"""simple docstring""" import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class lowerCAmelCase__ : '''simple docstring''' @staticmethod def A_ ( *lowercase , **lowercase ): pass def _snake_case ( lowercase__ ): return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. lowercase__ = ( """https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png""" ) @is_pipeline_test @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : Optional[Any] = pipeline( 'document-question-answering' , model=lowercase , tokenizer=lowercase , image_processor=lowercase ) _lowerCamelCase : Dict = INVOICE_URL _lowerCamelCase : List[str] = list(zip(*apply_tesseract(load_image(lowercase ) , lowercase , '' ) ) ) _lowerCamelCase : Optional[Any] = 'What is the placebo?' _lowerCamelCase : List[Any] = [ { 'image': load_image(lowercase ), 'question': question, }, { 'image': image, 'question': question, }, { 'image': image, 'question': question, 'word_boxes': word_boxes, }, ] return dqa_pipeline, examples def A_ ( self , lowercase , lowercase ): _lowerCamelCase : str = dqa_pipeline(lowercase , top_k=2 ) self.assertEqual( lowercase , [ [ {'score': ANY(lowercase ), 'answer': ANY(lowercase ), 'start': ANY(lowercase ), 'end': ANY(lowercase )}, {'score': ANY(lowercase ), 'answer': ANY(lowercase ), 'start': ANY(lowercase ), 'end': ANY(lowercase )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def A_ ( self ): _lowerCamelCase : List[Any] = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' ) _lowerCamelCase : Optional[Any] = INVOICE_URL _lowerCamelCase : List[Any] = 'How many cats are there?' _lowerCamelCase : Optional[int] = [ {'score': 0.00_01, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39}, {'score': 0.00_01, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40}, ] _lowerCamelCase : str = dqa_pipeline(image=lowercase , question=lowercase , top_k=2 ) self.assertEqual(nested_simplify(lowercase , decimals=4 ) , lowercase ) _lowerCamelCase : int = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual(nested_simplify(lowercase , decimals=4 ) , lowercase ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably _lowerCamelCase : List[str] = './tests/fixtures/tests_samples/COCO/000000039769.png' _lowerCamelCase : List[str] = dqa_pipeline(image=lowercase , question=lowercase , top_k=2 ) self.assertEqual(lowercase , [] ) # We can optionnally pass directly the words and bounding boxes _lowerCamelCase : Optional[Any] = './tests/fixtures/tests_samples/COCO/000000039769.png' _lowerCamelCase : Dict = [] _lowerCamelCase : int = [] _lowerCamelCase : Any = dqa_pipeline(image=lowercase , question=lowercase , words=lowercase , boxes=lowercase , top_k=2 ) self.assertEqual(lowercase , [] ) @slow @require_torch @require_detectrona @require_pytesseract def A_ ( self ): _lowerCamelCase : Optional[int] = pipeline( 'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , ) _lowerCamelCase : Union[str, Any] = INVOICE_URL _lowerCamelCase : str = 'What is the invoice number?' _lowerCamelCase : Dict = dqa_pipeline(image=lowercase , question=lowercase , top_k=2 ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ {'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16}, ] , ) _lowerCamelCase : Optional[Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ {'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16}, ] , ) _lowerCamelCase : Optional[int] = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ [ {'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def A_ ( self ): _lowerCamelCase : Optional[Any] = pipeline( 'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , ) _lowerCamelCase : Optional[int] = INVOICE_URL _lowerCamelCase : List[Any] = 'What is the invoice number?' _lowerCamelCase : Tuple = dqa_pipeline(image=lowercase , question=lowercase , top_k=2 ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ {'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23}, {'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16}, ] , ) _lowerCamelCase : Dict = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ {'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23}, {'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16}, ] , ) _lowerCamelCase : List[Any] = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ [ {'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23}, {'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def A_ ( self ): _lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained( 'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowercase ) _lowerCamelCase : Tuple = pipeline( 'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowercase , revision='3dc6de3' , ) _lowerCamelCase : Optional[Any] = INVOICE_URL _lowerCamelCase : Tuple = 'What is the invoice number?' _lowerCamelCase : List[Any] = dqa_pipeline(image=lowercase , question=lowercase , top_k=2 ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ {'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23}, ] , ) _lowerCamelCase : Optional[int] = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ {'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23}, ] , ) _lowerCamelCase : int = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ [ {'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23}, ] ] * 2 , ) _lowerCamelCase : List[Any] = list(zip(*apply_tesseract(load_image(lowercase ) , lowercase , '' ) ) ) # This model should also work if `image` is set to None _lowerCamelCase : Optional[Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ {'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def A_ ( self ): _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained( 'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowercase ) _lowerCamelCase : Optional[Any] = pipeline( 'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowercase , revision='3dc6de3' , max_seq_len=50 , ) _lowerCamelCase : Any = INVOICE_URL _lowerCamelCase : Tuple = 'What is the invoice number?' _lowerCamelCase : Any = dqa_pipeline(image=lowercase , question=lowercase , top_k=2 ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ {'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16}, ] , ) _lowerCamelCase : Any = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ [ {'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16}, ] ] * 2 , ) _lowerCamelCase : Optional[int] = list(zip(*apply_tesseract(load_image(lowercase ) , lowercase , '' ) ) ) # This model should also work if `image` is set to None _lowerCamelCase : Union[str, Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ {'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16}, ] , ) @slow @require_torch def A_ ( self ): _lowerCamelCase : Optional[Any] = pipeline( 'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , ) _lowerCamelCase : List[Any] = INVOICE_URL _lowerCamelCase : Any = 'What is the invoice number?' _lowerCamelCase : Dict = dqa_pipeline(image=lowercase , question=lowercase , top_k=2 ) self.assertEqual(nested_simplify(lowercase , decimals=4 ) , [{'answer': 'us-001'}] ) @require_tf @unittest.skip('Document question answering not implemented in TF' ) def A_ ( self ): pass
96
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowercase__ = get_logger(__name__) class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = """dummy_data""" lowerCamelCase__ = """datasets""" lowerCamelCase__ = False def __init__( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = False , lowercase = True , lowercase = None , ): _lowerCamelCase : Optional[Any] = 0 _lowerCamelCase : Dict = dataset_name _lowerCamelCase : Union[str, Any] = cache_dir _lowerCamelCase : Dict = use_local_dummy_data _lowerCamelCase : Tuple = config # download_callbacks take a single url as input _lowerCamelCase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _lowerCamelCase : Any = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _lowerCamelCase : str = str(lowercase ) # to be downloaded _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : int = None @property def A_ ( self ): if self._dummy_file is None: _lowerCamelCase : Tuple = self.download_dummy_data() return self._dummy_file @property def A_ ( self ): if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('dummy' , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join('dummy' , self.version_name ) @property def A_ ( self ): return os.path.join(self.dummy_data_folder , 'dummy_data.zip' ) def A_ ( self ): _lowerCamelCase : List[str] = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _lowerCamelCase : int = cached_path( lowercase , cache_dir=self.cache_dir , extract_compressed_file=lowercase , force_extract=lowercase ) return os.path.join(lowercase , self.dummy_file_name ) @property def A_ ( self ): return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def A_ ( self ): if self._bucket_url is None: _lowerCamelCase : List[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) ) return self._bucket_url @property def A_ ( self ): # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] ) def A_ ( self , lowercase , *lowercase ): if self.load_existing_dummy_data: # dummy data is downloaded and tested _lowerCamelCase : Union[str, Any] = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _lowerCamelCase : Union[str, Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(lowercase , lowercase ): return self.create_dummy_data_dict(lowercase , lowercase ) elif isinstance(lowercase , (list, tuple) ): return self.create_dummy_data_list(lowercase , lowercase ) else: return self.create_dummy_data_single(lowercase , lowercase ) def A_ ( self , lowercase , *lowercase ): return self.download_and_extract(lowercase ) def A_ ( self , lowercase , lowercase ): return self.download_and_extract(lowercase ) def A_ ( self , lowercase , *lowercase , **lowercase ): return path def A_ ( self ): return {} def A_ ( self , lowercase , lowercase ): _lowerCamelCase : Optional[int] = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(lowercase , lowercase ): for single_url in single_urls: download_callback(lowercase ) else: _lowerCamelCase : List[Any] = single_urls download_callback(lowercase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(lowercase , lowercase ): _lowerCamelCase : List[Any] = [os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) ) for x in single_urls] else: _lowerCamelCase : Optional[int] = single_urls _lowerCamelCase : List[Any] = os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) ) _lowerCamelCase : int = value # make sure that values are unique if all(isinstance(lowercase , lowercase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def A_ ( self , lowercase , lowercase ): _lowerCamelCase : Optional[Any] = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _lowerCamelCase : List[str] = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , lowercase ) ) for url in data_url ) _lowerCamelCase : int = all( url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _lowerCamelCase : List[str] = [data_url[0]] * len(lowercase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(lowercase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : str = os.path.join(lowercase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) ) dummy_data_list.append(lowercase ) return dummy_data_list def A_ ( self , lowercase , lowercase ): for download_callback in self.download_callbacks: download_callback(lowercase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : Tuple = os.path.join(lowercase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) ) if os.path.exists(lowercase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def A_ ( self ): pass def A_ ( self ): pass def A_ ( self , lowercase ): def _iter_archive_members(lowercase ): # this preserves the order of the members inside the ZIP archive _lowerCamelCase : str = Path(self.dummy_file ).parent _lowerCamelCase : Union[str, Any] = path.relative_to(lowercase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _lowerCamelCase : List[str] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(lowercase ) _lowerCamelCase : Optional[int] = Path(lowercase ) _lowerCamelCase : Dict = _iter_archive_members(lowercase ) if self.use_local_dummy_data else path.rglob('*' ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('.', '__') ): yield file_path.relative_to(lowercase ).as_posix(), file_path.open('rb' ) def A_ ( self , lowercase ): if not isinstance(lowercase , lowercase ): _lowerCamelCase : List[str] = [paths] for path in paths: if os.path.isfile(lowercase ): if os.path.basename(lowercase ).startswith(('.', '__') ): return yield path else: for dirpath, dirnames, filenames in os.walk(lowercase ): if os.path.basename(lowercase ).startswith(('.', '__') ): continue dirnames.sort() for filename in sorted(lowercase ): if filename.startswith(('.', '__') ): continue yield os.path.join(lowercase , lowercase )
96
1
"""simple docstring""" from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = 42 @flax_register_to_config class lowerCAmelCase__ ( nn.Module, lowercase, lowercase ): '''simple docstring''' lowerCamelCase__ = 32 lowerCamelCase__ = 4 lowerCamelCase__ = 4 lowerCamelCase__ = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) lowerCamelCase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") lowerCamelCase__ = False lowerCamelCase__ = (3_20, 6_40, 12_80, 12_80) lowerCamelCase__ = 2 lowerCamelCase__ = 8 lowerCamelCase__ = None lowerCamelCase__ = 12_80 lowerCamelCase__ = 0.0 lowerCamelCase__ = False lowerCamelCase__ = jnp.floataa lowerCamelCase__ = True lowerCamelCase__ = 0 lowerCamelCase__ = False def A_ ( self , lowercase ): # init input tensors _lowerCamelCase : Optional[int] = (1, self.in_channels, self.sample_size, self.sample_size) _lowerCamelCase : Tuple = jnp.zeros(lowercase , dtype=jnp.floataa ) _lowerCamelCase : str = jnp.ones((1,) , dtype=jnp.intaa ) _lowerCamelCase : Optional[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) _lowerCamelCase, _lowerCamelCase : Dict = jax.random.split(lowercase ) _lowerCamelCase : Union[str, Any] = {'params': params_rng, 'dropout': dropout_rng} return self.init(lowercase , lowercase , lowercase , lowercase )["params"] def A_ ( self ): _lowerCamelCase : Any = self.block_out_channels _lowerCamelCase : int = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( 'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. _lowerCamelCase : str = self.num_attention_heads or self.attention_head_dim # input _lowerCamelCase : Optional[int] = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time _lowerCamelCase : List[str] = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) _lowerCamelCase : str = FlaxTimestepEmbedding(lowercase , dtype=self.dtype ) _lowerCamelCase : Optional[Any] = self.only_cross_attention if isinstance(lowercase , lowercase ): _lowerCamelCase : str = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowercase , lowercase ): _lowerCamelCase : Union[str, Any] = (num_attention_heads,) * len(self.down_block_types ) # down _lowerCamelCase : int = [] _lowerCamelCase : Dict = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): _lowerCamelCase : Dict = output_channel _lowerCamelCase : List[str] = block_out_channels[i] _lowerCamelCase : Union[str, Any] = i == len(lowercase ) - 1 if down_block_type == "CrossAttnDownBlock2D": _lowerCamelCase : str = FlaxCrossAttnDownBlockaD( in_channels=lowercase , out_channels=lowercase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _lowerCamelCase : int = FlaxDownBlockaD( in_channels=lowercase , out_channels=lowercase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(lowercase ) _lowerCamelCase : int = down_blocks # mid _lowerCamelCase : List[str] = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up _lowerCamelCase : str = [] _lowerCamelCase : List[str] = list(reversed(lowercase ) ) _lowerCamelCase : Any = list(reversed(lowercase ) ) _lowerCamelCase : List[Any] = list(reversed(lowercase ) ) _lowerCamelCase : Any = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): _lowerCamelCase : Optional[Any] = output_channel _lowerCamelCase : Optional[Any] = reversed_block_out_channels[i] _lowerCamelCase : Optional[int] = reversed_block_out_channels[min(i + 1 , len(lowercase ) - 1 )] _lowerCamelCase : str = i == len(lowercase ) - 1 if up_block_type == "CrossAttnUpBlock2D": _lowerCamelCase : Any = FlaxCrossAttnUpBlockaD( in_channels=lowercase , out_channels=lowercase , prev_output_channel=lowercase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _lowerCamelCase : int = FlaxUpBlockaD( in_channels=lowercase , out_channels=lowercase , prev_output_channel=lowercase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(lowercase ) _lowerCamelCase : Dict = output_channel _lowerCamelCase : int = up_blocks # out _lowerCamelCase : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) _lowerCamelCase : Optional[int] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase = True , lowercase = False , ): # 1. time if not isinstance(lowercase , jnp.ndarray ): _lowerCamelCase : int = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(lowercase , jnp.ndarray ) and len(timesteps.shape ) == 0: _lowerCamelCase : Tuple = timesteps.astype(dtype=jnp.floataa ) _lowerCamelCase : Dict = jnp.expand_dims(lowercase , 0 ) _lowerCamelCase : int = self.time_proj(lowercase ) _lowerCamelCase : Optional[Any] = self.time_embedding(lowercase ) # 2. pre-process _lowerCamelCase : Union[str, Any] = jnp.transpose(lowercase , (0, 2, 3, 1) ) _lowerCamelCase : Union[str, Any] = self.conv_in(lowercase ) # 3. down _lowerCamelCase : Optional[int] = (sample,) for down_block in self.down_blocks: if isinstance(lowercase , lowercase ): _lowerCamelCase, _lowerCamelCase : int = down_block(lowercase , lowercase , lowercase , deterministic=not train ) else: _lowerCamelCase, _lowerCamelCase : List[str] = down_block(lowercase , lowercase , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: _lowerCamelCase : List[Any] = () for down_block_res_sample, down_block_additional_residual in zip( lowercase , lowercase ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) _lowerCamelCase : List[Any] = new_down_block_res_samples # 4. mid _lowerCamelCase : Tuple = self.mid_block(lowercase , lowercase , lowercase , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: _lowerCamelCase : Dict = down_block_res_samples[-(self.layers_per_block + 1) :] _lowerCamelCase : List[Any] = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(lowercase , lowercase ): _lowerCamelCase : str = up_block( lowercase , temb=lowercase , encoder_hidden_states=lowercase , res_hidden_states_tuple=lowercase , deterministic=not train , ) else: _lowerCamelCase : int = up_block(lowercase , temb=lowercase , res_hidden_states_tuple=lowercase , deterministic=not train ) # 6. post-process _lowerCamelCase : Optional[int] = self.conv_norm_out(lowercase ) _lowerCamelCase : Any = nn.silu(lowercase ) _lowerCamelCase : Optional[int] = self.conv_out(lowercase ) _lowerCamelCase : Any = jnp.transpose(lowercase , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=lowercase )
96
"""simple docstring""" def _snake_case ( lowercase__ ): stooge(lowercase__ , 0 , len(lowercase__ ) - 1 ) return arr def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: _lowerCamelCase, _lowerCamelCase : Optional[Any] = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: _lowerCamelCase : Union[str, Any] = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) # Recursively sort last 2/3 elements stooge(lowercase__ , i + t , (lowercase__) ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) if __name__ == "__main__": lowercase__ = input("""Enter numbers separated by a comma:\n""").strip() lowercase__ = [int(item) for item in user_input.split(""",""")] print(stooge_sort(unsorted))
96
1
"""simple docstring""" import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() lowercase__ = logging.get_logger() @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = 42 lowerCamelCase__ = field(default_factory=lowercase ) lowerCamelCase__ = field(default_factory=lowercase ) def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : Any = len(list(m.modules() ) ) == 1 or isinstance(lowercase , nn.Convad ) or isinstance(lowercase , nn.BatchNormad ) if has_not_submodules: self.traced.append(lowercase ) def __call__( self , lowercase ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(lowercase ) [x.remove() for x in self.handles] return self @property def A_ ( self ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda lowercase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 0 lowerCamelCase__ = field(default_factory=lowercase ) lowerCamelCase__ = field(default_factory=lowercase ) def __call__( self , lowercase ): _lowerCamelCase : Union[str, Any] = Tracker(self.dest )(lowercase ).parametrized _lowerCamelCase : Optional[Any] = Tracker(self.src )(lowercase ).parametrized _lowerCamelCase : Tuple = list(filter(lambda lowercase : type(lowercase ) not in self.src_skip , lowercase ) ) _lowerCamelCase : Optional[Any] = list(filter(lambda lowercase : type(lowercase ) not in self.dest_skip , lowercase ) ) if len(lowercase ) != len(lowercase ): raise Exception( F'''Numbers of operations are different. Source module has {len(lowercase )} operations while''' F''' destination module has {len(lowercase )}.''' ) for dest_m, src_m in zip(lowercase , lowercase ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = True ): print(f'''Converting {name}...''' ) with torch.no_grad(): _lowerCamelCase : List[str] = timm.create_model(lowercase__ , pretrained=lowercase__ ).eval() _lowerCamelCase : List[Any] = ResNetForImageClassification(lowercase__ ).eval() _lowerCamelCase : Union[str, Any] = ModuleTransfer(src=lowercase__ , dest=lowercase__ ) _lowerCamelCase : int = torch.randn((1, 3, 224, 224) ) module_transfer(lowercase__ ) assert torch.allclose(from_model(lowercase__ ) , our_model(lowercase__ ).logits ), "The model logits don't match the original one." _lowerCamelCase : Optional[int] = f'''resnet{'-'.join(name.split('resnet' ) )}''' print(lowercase__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=lowercase__ , ) # we can use the convnext one _lowerCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=lowercase__ , ) print(f'''Pushed {checkpoint_name}''' ) def _snake_case ( lowercase__ , lowercase__ = None , lowercase__ = True ): _lowerCamelCase : Dict = 'imagenet-1k-id2label.json' _lowerCamelCase : List[Any] = 1000 _lowerCamelCase : List[str] = (1, num_labels) _lowerCamelCase : int = 'huggingface/label-files' _lowerCamelCase : int = num_labels _lowerCamelCase : int = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) ) _lowerCamelCase : Optional[Any] = {int(lowercase__ ): v for k, v in idalabel.items()} _lowerCamelCase : Union[str, Any] = idalabel _lowerCamelCase : Any = {v: k for k, v in idalabel.items()} _lowerCamelCase : str = partial(lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ ) _lowerCamelCase : Optional[Any] = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(lowercase__ , names_to_config[model_name] , lowercase__ , lowercase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) return config, expected_shape if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) lowercase__ = parser.parse_args() lowercase__ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
96
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = ["""image_processor""", """tokenizer"""] lowerCamelCase__ = """BlipImageProcessor""" lowerCamelCase__ = """AutoTokenizer""" def __init__( self , lowercase , lowercase , lowercase ): super().__init__(lowercase , lowercase ) # add QFormer tokenizer _lowerCamelCase : int = qformer_tokenizer def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ): if images is None and text is None: raise ValueError('You have to specify at least images or text.' ) _lowerCamelCase : int = BatchFeature() if text is not None: _lowerCamelCase : List[str] = self.tokenizer( text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) encoding.update(lowercase ) _lowerCamelCase : List[str] = self.qformer_tokenizer( text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) _lowerCamelCase : List[Any] = qformer_text_encoding.pop('input_ids' ) _lowerCamelCase : Tuple = qformer_text_encoding.pop('attention_mask' ) if images is not None: _lowerCamelCase : int = self.image_processor(lowercase , return_tensors=lowercase ) encoding.update(lowercase ) return encoding def A_ ( self , *lowercase , **lowercase ): return self.tokenizer.batch_decode(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.tokenizer.decode(*lowercase , **lowercase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def A_ ( self ): _lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names _lowerCamelCase : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def A_ ( self , lowercase , **lowercase ): if os.path.isfile(lowercase ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(lowercase , exist_ok=lowercase ) _lowerCamelCase : Optional[Any] = os.path.join(lowercase , 'qformer_tokenizer' ) self.qformer_tokenizer.save_pretrained(lowercase ) return super().save_pretrained(lowercase , **lowercase ) @classmethod def A_ ( cls , lowercase , **lowercase ): _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , subfolder='qformer_tokenizer' ) _lowerCamelCase : Dict = cls._get_arguments_from_pretrained(lowercase , **lowercase ) args.append(lowercase ) return cls(*lowercase )
96
1
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
96
"""simple docstring""" import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) lowercase__ = logging.getLogger() def _snake_case ( lowercase__ ): _lowerCamelCase : List[Any] = {} _lowerCamelCase : List[Any] = os.path.join(lowercase__ , 'all_results.json' ) if os.path.exists(lowercase__ ): with open(lowercase__ , 'r' ) as f: _lowerCamelCase : List[Any] = json.load(lowercase__ ) else: raise ValueError(f'''can\'t find {path}''' ) return results lowercase__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def A_ ( self ): import xla_spawn _lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir() _lowerCamelCase : List[Any] = F''' ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(lowercase , 'argv' , lowercase ): _lowerCamelCase : Dict = time() xla_spawn.main() _lowerCamelCase : Any = time() _lowerCamelCase : Optional[int] = get_results(lowercase ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500 ) def A_ ( self ): import xla_spawn _lowerCamelCase : Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split() with patch.object(lowercase , 'argv' , lowercase ): xla_spawn.main()
96
1
"""simple docstring""" from math import factorial, pi def _snake_case ( lowercase__ , lowercase__ = 30 ): if not isinstance(lowercase__ , (int, float) ): raise ValueError('maclaurin_sin() requires either an int or float for theta' ) if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0: raise ValueError('maclaurin_sin() requires a positive int for accuracy' ) _lowerCamelCase : List[Any] = float(lowercase__ ) _lowerCamelCase : Union[str, Any] = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowercase__ ) ) def _snake_case ( lowercase__ , lowercase__ = 30 ): if not isinstance(lowercase__ , (int, float) ): raise ValueError('maclaurin_cos() requires either an int or float for theta' ) if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0: raise ValueError('maclaurin_cos() requires a positive int for accuracy' ) _lowerCamelCase : Optional[int] = float(lowercase__ ) _lowerCamelCase : List[str] = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowercase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
96
"""simple docstring""" from __future__ import annotations import math import numpy as np from numpy.linalg import norm def _snake_case ( lowercase__ , lowercase__ ): return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowercase__ , lowercase__ ) ) ) def _snake_case ( lowercase__ , lowercase__ ): if dataset.ndim != value_array.ndim: _lowerCamelCase : Tuple = ( 'Wrong input data\'s dimensions... ' f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}''' ) raise ValueError(lowercase__ ) try: if dataset.shape[1] != value_array.shape[1]: _lowerCamelCase : Optional[int] = ( 'Wrong input data\'s shape... ' f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}''' ) raise ValueError(lowercase__ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('Wrong shape' ) if dataset.dtype != value_array.dtype: _lowerCamelCase : int = ( 'Input data have different datatype... ' f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}''' ) raise TypeError(lowercase__ ) _lowerCamelCase : Optional[int] = [] for value in value_array: _lowerCamelCase : Tuple = euclidean(lowercase__ , dataset[0] ) _lowerCamelCase : Union[str, Any] = dataset[0].tolist() for dataset_value in dataset[1:]: _lowerCamelCase : Optional[Any] = euclidean(lowercase__ , lowercase__ ) if dist > temp_dist: _lowerCamelCase : List[Any] = temp_dist _lowerCamelCase : List[str] = dataset_value.tolist() answer.append([vector, dist] ) return answer def _snake_case ( lowercase__ , lowercase__ ): return np.dot(lowercase__ , lowercase__ ) / (norm(lowercase__ ) * norm(lowercase__ )) if __name__ == "__main__": import doctest doctest.testmod()
96
1
"""simple docstring""" class lowerCAmelCase__ : '''simple docstring''' def __init__( self ): _lowerCamelCase : Tuple = 0 _lowerCamelCase : Any = 0 _lowerCamelCase : int = {} def A_ ( self , lowercase ): if vertex not in self.adjacency: _lowerCamelCase : Optional[Any] = {} self.num_vertices += 1 def A_ ( self , lowercase , lowercase , lowercase ): self.add_vertex(lowercase ) self.add_vertex(lowercase ) if head == tail: return _lowerCamelCase : Optional[int] = weight _lowerCamelCase : Optional[int] = weight def A_ ( self ): _lowerCamelCase : Optional[Any] = self.get_edges() for edge in edges: _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = edge edges.remove((tail, head, weight) ) for i in range(len(lowercase ) ): _lowerCamelCase : Any = list(edges[i] ) edges.sort(key=lambda lowercase : e[2] ) for i in range(len(lowercase ) - 1 ): if edges[i][2] >= edges[i + 1][2]: _lowerCamelCase : Dict = edges[i][2] + 1 for edge in edges: _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = edge _lowerCamelCase : int = weight _lowerCamelCase : Dict = weight def __str__( self ): _lowerCamelCase : Tuple = '' for tail in self.adjacency: for head in self.adjacency[tail]: _lowerCamelCase : Optional[Any] = self.adjacency[head][tail] string += F'''{head} -> {tail} == {weight}\n''' return string.rstrip('\n' ) def A_ ( self ): _lowerCamelCase : List[str] = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def A_ ( self ): return self.adjacency.keys() @staticmethod def A_ ( lowercase=None , lowercase=None ): _lowerCamelCase : List[str] = Graph() if vertices is None: _lowerCamelCase : Optional[Any] = [] if edges is None: _lowerCamelCase : Dict = [] for vertex in vertices: g.add_vertex(lowercase ) for edge in edges: g.add_edge(*lowercase ) return g class lowerCAmelCase__ : '''simple docstring''' def __init__( self ): _lowerCamelCase : Dict = {} _lowerCamelCase : List[str] = {} def __len__( self ): return len(self.parent ) def A_ ( self , lowercase ): if item in self.parent: return self.find(lowercase ) _lowerCamelCase : str = item _lowerCamelCase : int = 0 return item def A_ ( self , lowercase ): if item not in self.parent: return self.make_set(lowercase ) if item != self.parent[item]: _lowerCamelCase : Tuple = self.find(self.parent[item] ) return self.parent[item] def A_ ( self , lowercase , lowercase ): _lowerCamelCase : int = self.find(lowercase ) _lowerCamelCase : Union[str, Any] = self.find(lowercase ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: _lowerCamelCase : int = roota return roota if self.rank[roota] < self.rank[roota]: _lowerCamelCase : Any = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 _lowerCamelCase : Optional[int] = roota return roota return None @staticmethod def A_ ( lowercase ): _lowerCamelCase : str = graph.num_vertices _lowerCamelCase : str = Graph.UnionFind() _lowerCamelCase : Dict = [] while num_components > 1: _lowerCamelCase : Dict = {} for vertex in graph.get_vertices(): _lowerCamelCase : Optional[Any] = -1 _lowerCamelCase : List[str] = graph.get_edges() for edge in edges: _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = edge edges.remove((tail, head, weight) ) for edge in edges: _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = edge _lowerCamelCase : Optional[int] = union_find.find(lowercase ) _lowerCamelCase : str = union_find.find(lowercase ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: _lowerCamelCase : int = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: _lowerCamelCase : Any = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = cheap_edge[vertex] if union_find.find(lowercase ) != union_find.find(lowercase ): union_find.union(lowercase , lowercase ) mst_edges.append(cheap_edge[vertex] ) _lowerCamelCase : Dict = num_components - 1 _lowerCamelCase : int = Graph.build(edges=lowercase ) return mst
96
"""simple docstring""" import socket def _snake_case ( ): _lowerCamelCase : List[Any] = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) _lowerCamelCase : Union[str, Any] = socket.gethostname() _lowerCamelCase : List[Any] = 12312 sock.connect((host, port) ) sock.send(B'Hello server!' ) with open('Received_file' , 'wb' ) as out_file: print('File opened' ) print('Receiving data...' ) while True: _lowerCamelCase : int = sock.recv(1024 ) if not data: break out_file.write(lowercase__ ) print('Successfully received the file' ) sock.close() print('Connection closed' ) if __name__ == "__main__": main()
96
1
"""simple docstring""" def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate _lowerCamelCase : str = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly _lowerCamelCase : Optional[Any] = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
96
"""simple docstring""" from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record lowercase__ = """\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } """ lowercase__ = """\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. """ lowercase__ = """ Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for 'record': list of question-answer dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'prediction_text': the predicted answer text - for 'multirc': list of question-answer dictionaries with the following keys: - 'idx': index of the question-answer pair as specified by the dataset - 'prediction': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for 'record': list of question-answers dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'answers': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for 'record': - 'exact_match': Exact match between answer and gold answer - 'f1': F1 score - for 'multirc': - 'exact_match': Exact match between answer and gold answer - 'f1_m': Per-question macro-F1 score - 'f1_a': Average F1 score over all answers - for 'axb': 'matthews_correlation': Matthew Correlation - for 'cb': - 'accuracy': Accuracy - 'f1': F1 score - for all others: - 'accuracy': Accuracy Examples: >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'cb') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'record') >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}] >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc') >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'axb') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'matthews_correlation': 1.0} """ def _snake_case ( lowercase__ , lowercase__ ): return float((preds == labels).mean() ) def _snake_case ( lowercase__ , lowercase__ , lowercase__="binary" ): _lowerCamelCase : str = simple_accuracy(lowercase__ , lowercase__ ) _lowerCamelCase : Any = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ , average=lowercase__ ) ) return { "accuracy": acc, "f1": fa, } def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : Any = {} for id_pred, label in zip(lowercase__ , lowercase__ ): _lowerCamelCase : Tuple = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}''' _lowerCamelCase : Union[str, Any] = id_pred['prediction'] if question_id in question_map: question_map[question_id].append((pred, label) ) else: _lowerCamelCase : Optional[Any] = [(pred, label)] _lowerCamelCase, _lowerCamelCase : Optional[int] = [], [] for question, preds_labels in question_map.items(): _lowerCamelCase, _lowerCamelCase : Tuple = zip(*lowercase__ ) _lowerCamelCase : List[str] = fa_score(y_true=lowercase__ , y_pred=lowercase__ , average='macro' ) fas.append(lowercase__ ) _lowerCamelCase : int = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase__ ) ) ems.append(lowercase__ ) _lowerCamelCase : Optional[Any] = float(sum(lowercase__ ) / len(lowercase__ ) ) _lowerCamelCase : Optional[int] = sum(lowercase__ ) / len(lowercase__ ) _lowerCamelCase : List[Any] = float(fa_score(y_true=lowercase__ , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def A_ ( self ): if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( 'You should supply a configuration name selected in ' '["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , ) def A_ ( self ): if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value('int64' ), "query": datasets.Value('int64' ), }, "prediction_text": datasets.Value('string' ), }, "references": { "idx": { "passage": datasets.Value('int64' ), "query": datasets.Value('int64' ), }, "answers": datasets.Sequence(datasets.Value('string' ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value('int64' ), "paragraph": datasets.Value('int64' ), "question": datasets.Value('int64' ), }, "prediction": datasets.Value('int64' ), }, "references": datasets.Value('int64' ), } else: return { "predictions": datasets.Value('int64' ), "references": datasets.Value('int64' ), } def A_ ( self , lowercase , lowercase ): if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )} elif self.config_name == "cb": return acc_and_fa(lowercase , lowercase , fa_avg='macro' ) elif self.config_name == "record": _lowerCamelCase : List[str] = [ { 'qas': [ {'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]} for ref in references ] } ] _lowerCamelCase : Union[str, Any] = {pred['idx']['query']: pred['prediction_text'] for pred in predictions} return evaluate_record(lowercase , lowercase )[0] elif self.config_name == "multirc": return evaluate_multirc(lowercase , lowercase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(lowercase , lowercase )} else: raise KeyError( 'You should supply a configuration name selected in ' '["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
96
1
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class lowerCAmelCase__ : '''simple docstring''' def __init__( self ): _lowerCamelCase : Optional[int] = {} def A_ ( self , lowercase ): _lowerCamelCase : Optional[Any] = {} def A_ ( self , lowercase , lowercase , lowercase ): if nodea not in self.connections: self.add_node(lowercase ) if nodea not in self.connections: self.add_node(lowercase ) _lowerCamelCase : Union[str, Any] = probability def A_ ( self ): return list(self.connections ) def A_ ( self , lowercase ): _lowerCamelCase : Optional[Any] = 0 _lowerCamelCase : Any = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : Optional[Any] = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(lowercase__ , lowercase__ , lowercase__ ) _lowerCamelCase : List[str] = Counter(graph.get_nodes() ) _lowerCamelCase : Union[str, Any] = start for _ in range(lowercase__ ): _lowerCamelCase : List[Any] = graph.transition(lowercase__ ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
96
"""simple docstring""" import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = DDIMPipeline lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS lowerCamelCase__ = PipelineTesterMixin.required_optional_params - { """num_images_per_prompt""", """latents""", """callback""", """callback_steps""", } lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS lowerCamelCase__ = False def A_ ( self ): torch.manual_seed(0 ) _lowerCamelCase : List[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) _lowerCamelCase : List[str] = DDIMScheduler() _lowerCamelCase : Optional[int] = {'unet': unet, 'scheduler': scheduler} return components def A_ ( self , lowercase , lowercase=0 ): if str(lowercase ).startswith('mps' ): _lowerCamelCase : Dict = torch.manual_seed(lowercase ) else: _lowerCamelCase : List[str] = torch.Generator(device=lowercase ).manual_seed(lowercase ) _lowerCamelCase : Tuple = { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def A_ ( self ): _lowerCamelCase : Any = 'cpu' _lowerCamelCase : Tuple = self.get_dummy_components() _lowerCamelCase : Optional[Any] = self.pipeline_class(**lowercase ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : str = self.get_dummy_inputs(lowercase ) _lowerCamelCase : int = pipe(**lowercase ).images _lowerCamelCase : Any = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3) ) _lowerCamelCase : Tuple = np.array( [1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] ) _lowerCamelCase : str = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase , 1E-3 ) def A_ ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def A_ ( self ): super().test_save_load_local(expected_max_difference=3E-3 ) def A_ ( self ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def A_ ( self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : Optional[Any] = 'google/ddpm-cifar10-32' _lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(lowercase ) _lowerCamelCase : Dict = DDIMScheduler() _lowerCamelCase : Dict = DDIMPipeline(unet=lowercase , scheduler=lowercase ) ddim.to(lowercase ) ddim.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : List[str] = torch.manual_seed(0 ) _lowerCamelCase : str = ddim(generator=lowercase , eta=0.0 , output_type='numpy' ).images _lowerCamelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _lowerCamelCase : List[Any] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A_ ( self ): _lowerCamelCase : Optional[int] = 'google/ddpm-ema-bedroom-256' _lowerCamelCase : str = UNetaDModel.from_pretrained(lowercase ) _lowerCamelCase : str = DDIMScheduler.from_pretrained(lowercase ) _lowerCamelCase : Optional[int] = DDIMPipeline(unet=lowercase , scheduler=lowercase ) ddpm.to(lowercase ) ddpm.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : Tuple = torch.manual_seed(0 ) _lowerCamelCase : int = ddpm(generator=lowercase , output_type='numpy' ).images _lowerCamelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _lowerCamelCase : str = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
96
1
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , lowercase=3 , lowercase=32 , lowercase=3 , lowercase=10 , lowercase=[10, 20, 30, 40] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ): _lowerCamelCase : List[str] = parent _lowerCamelCase : Dict = batch_size _lowerCamelCase : Optional[Any] = image_size _lowerCamelCase : int = num_channels _lowerCamelCase : int = embeddings_size _lowerCamelCase : Optional[Any] = hidden_sizes _lowerCamelCase : Tuple = depths _lowerCamelCase : Optional[Any] = is_training _lowerCamelCase : Optional[Any] = use_labels _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Tuple = num_labels _lowerCamelCase : List[str] = scope _lowerCamelCase : Union[str, Any] = len(lowercase ) def A_ ( self ): _lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCamelCase : Dict = None if self.use_labels: _lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) _lowerCamelCase : Any = self.get_config() return config, pixel_values, labels def A_ ( self ): return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : Optional[int] = TFRegNetModel(config=lowercase ) _lowerCamelCase : Any = model(lowercase , training=lowercase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : str = self.num_labels _lowerCamelCase : int = TFRegNetForImageClassification(lowercase ) _lowerCamelCase : List[str] = model(lowercase , labels=lowercase , training=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self ): _lowerCamelCase : Any = self.prepare_config_and_inputs() _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = config_and_inputs _lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowerCamelCase__ = ( {"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification} if is_tf_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def A_ ( self ): _lowerCamelCase : Union[str, Any] = TFRegNetModelTester(self ) _lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase ) def A_ ( self ): return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def A_ ( self ): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def A_ ( self ): super().test_keras_fit() @unittest.skip(reason='RegNet does not support input and output embeddings' ) def A_ ( self ): pass def A_ ( self ): _lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Union[str, Any] = model_class(lowercase ) _lowerCamelCase : Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Dict = [*signature.parameters.keys()] _lowerCamelCase : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase ) def A_ ( self ): _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def A_ ( self ): def check_hidden_states_output(lowercase , lowercase , lowercase ): _lowerCamelCase : Optional[Any] = model_class(lowercase ) _lowerCamelCase : List[Any] = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase ) _lowerCamelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowerCamelCase : str = self.model_tester.num_stages self.assertEqual(len(lowercase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) _lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Any = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: _lowerCamelCase : Tuple = layer_type _lowerCamelCase : int = True check_hidden_states_output(lowercase , lowercase , lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCamelCase : str = True check_hidden_states_output(lowercase , lowercase , lowercase ) def A_ ( self ): _lowerCamelCase, _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ): _lowerCamelCase : Optional[Any] = model(lowercase , return_dict=lowercase , **lowercase ) _lowerCamelCase : int = model(lowercase , return_dict=lowercase , **lowercase ).to_tuple() def recursive_check(lowercase , lowercase ): if isinstance(lowercase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ): recursive_check(lowercase , lowercase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(lowercase , lowercase ) ) , msg=( 'Tuple and dict output are not equal. Difference:' F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(lowercase , lowercase ) for model_class in self.all_model_classes: _lowerCamelCase : Dict = model_class(lowercase ) _lowerCamelCase : Any = self._prepare_for_class(lowercase , lowercase ) _lowerCamelCase : List[Any] = self._prepare_for_class(lowercase , lowercase ) check_equivalence(lowercase , lowercase , lowercase ) _lowerCamelCase : Any = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) _lowerCamelCase : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) check_equivalence(lowercase , lowercase , lowercase ) _lowerCamelCase : int = self._prepare_for_class(lowercase , lowercase ) _lowerCamelCase : Union[str, Any] = self._prepare_for_class(lowercase , lowercase ) check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} ) _lowerCamelCase : Optional[Any] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) _lowerCamelCase : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} ) def A_ ( self ): _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase ) @slow def A_ ( self ): for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Dict = TFRegNetModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def _snake_case ( ): _lowerCamelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def A_ ( self ): return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def A_ ( self ): _lowerCamelCase : Optional[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _lowerCamelCase : Optional[int] = self.default_image_processor _lowerCamelCase : Optional[int] = prepare_img() _lowerCamelCase : Optional[Any] = image_processor(images=lowercase , return_tensors='tf' ) # forward pass _lowerCamelCase : Dict = model(**lowercase , training=lowercase ) # verify the logits _lowerCamelCase : Any = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowercase ) _lowerCamelCase : str = tf.constant([-0.41_80, -1.50_51, -3.48_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 )
96
"""simple docstring""" # Imports import numpy as np class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ): self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase ) def A_ ( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ): if red is not None: _lowerCamelCase : Optional[int] = red if green is not None: _lowerCamelCase : Optional[Any] = green if blue is not None: _lowerCamelCase : Tuple = blue if red_edge is not None: _lowerCamelCase : Optional[Any] = red_edge if nir is not None: _lowerCamelCase : Union[str, Any] = nir return True def A_ ( self , lowercase="" , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ): self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase ) _lowerCamelCase : str = { 'ARVI2': self.arvaa, 'CCCI': self.ccci, 'CVI': self.cvi, 'GLI': self.gli, 'NDVI': self.ndvi, 'BNDVI': self.bndvi, 'redEdgeNDVI': self.red_edge_ndvi, 'GNDVI': self.gndvi, 'GBNDVI': self.gbndvi, 'GRNDVI': self.grndvi, 'RBNDVI': self.rbndvi, 'PNDVI': self.pndvi, 'ATSAVI': self.atsavi, 'BWDRVI': self.bwdrvi, 'CIgreen': self.ci_green, 'CIrededge': self.ci_rededge, 'CI': self.ci, 'CTVI': self.ctvi, 'GDVI': self.gdvi, 'EVI': self.evi, 'GEMI': self.gemi, 'GOSAVI': self.gosavi, 'GSAVI': self.gsavi, 'Hue': self.hue, 'IVI': self.ivi, 'IPVI': self.ipvi, 'I': self.i, 'RVI': self.rvi, 'MRVI': self.mrvi, 'MSAVI': self.m_savi, 'NormG': self.norm_g, 'NormNIR': self.norm_nir, 'NormR': self.norm_r, 'NGRDI': self.ngrdi, 'RI': self.ri, 'S': self.s, 'IF': self._if, 'DVI': self.dvi, 'TVI': self.tvi, 'NDRE': self.ndre, } try: return funcs[index]() except KeyError: print('Index not in the list!' ) return False def A_ ( self ): return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def A_ ( self ): return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def A_ ( self ): return self.nir * (self.red / (self.green**2)) def A_ ( self ): return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def A_ ( self ): return (self.nir - self.red) / (self.nir + self.red) def A_ ( self ): return (self.nir - self.blue) / (self.nir + self.blue) def A_ ( self ): return (self.redEdge - self.red) / (self.redEdge + self.red) def A_ ( self ): return (self.nir - self.green) / (self.nir + self.green) def A_ ( self ): return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def A_ ( self ): return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def A_ ( self ): return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def A_ ( self ): return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def A_ ( self , lowercase=0.08 , lowercase=1.22 , lowercase=0.03 ): return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def A_ ( self ): return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def A_ ( self ): return (self.nir / self.green) - 1 def A_ ( self ): return (self.nir / self.redEdge) - 1 def A_ ( self ): return (self.red - self.blue) / self.red def A_ ( self ): _lowerCamelCase : Any = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def A_ ( self ): return self.nir - self.green def A_ ( self ): return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def A_ ( self ): _lowerCamelCase : Any = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red) def A_ ( self , lowercase=0.16 ): return (self.nir - self.green) / (self.nir + self.green + y) def A_ ( self , lowercase=0.5 ): return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def A_ ( self ): return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) def A_ ( self , lowercase=None , lowercase=None ): return (self.nir - b) / (a * self.red) def A_ ( self ): return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def A_ ( self ): return (self.red + self.green + self.blue) / 30.5 def A_ ( self ): return self.nir / self.red def A_ ( self ): return (self.rvi() - 1) / (self.rvi() + 1) def A_ ( self ): return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def A_ ( self ): return self.green / (self.nir + self.red + self.green) def A_ ( self ): return self.nir / (self.nir + self.red + self.green) def A_ ( self ): return self.red / (self.nir + self.red + self.green) def A_ ( self ): return (self.green - self.red) / (self.green + self.red) def A_ ( self ): return (self.red - self.green) / (self.red + self.green) def A_ ( self ): _lowerCamelCase : Union[str, Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) _lowerCamelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def A_ ( self ): return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def A_ ( self ): return self.nir / self.red def A_ ( self ): return (self.ndvi() + 0.5) ** (1 / 2) def A_ ( self ): return (self.nir - self.redEdge) / (self.nir + self.redEdge)
96
1
"""simple docstring""" import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , lowercase=13 , lowercase=3 , lowercase=True , lowercase=True , lowercase=0.1 , lowercase=0.1 , lowercase=224 , lowercase=1000 , lowercase=[3, 3, 6, 4] , lowercase=[48, 56, 112, 220] , ): _lowerCamelCase : int = parent _lowerCamelCase : Optional[Any] = batch_size _lowerCamelCase : Optional[Any] = num_channels _lowerCamelCase : str = is_training _lowerCamelCase : Any = use_labels _lowerCamelCase : Any = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Dict = num_labels _lowerCamelCase : Any = image_size _lowerCamelCase : str = layer_depths _lowerCamelCase : Optional[Any] = embed_dims def A_ ( self ): _lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCamelCase : str = None if self.use_labels: _lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) _lowerCamelCase : int = self.get_config() return config, pixel_values, labels def A_ ( self ): return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase , layer_scale_init_value=1E-5 , ) def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : str = SwiftFormerModel(config=lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : Any = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : List[str] = self.num_labels _lowerCamelCase : Any = SwiftFormerForImageClassification(lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : Optional[int] = model(lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) _lowerCamelCase : List[Any] = SwiftFormerForImageClassification(lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCamelCase : Tuple = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self ): ((_lowerCamelCase), (_lowerCamelCase), (_lowerCamelCase)) : Optional[Any] = self.prepare_config_and_inputs() _lowerCamelCase : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () lowerCamelCase__ = ( {"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification} if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def A_ ( self ): _lowerCamelCase : List[str] = SwiftFormerModelTester(self ) _lowerCamelCase : Optional[int] = ConfigTester( self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def A_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason='SwiftFormer does not use inputs_embeds' ) def A_ ( self ): pass def A_ ( self ): _lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Optional[Any] = model_class(lowercase ) _lowerCamelCase : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) ) def A_ ( self ): _lowerCamelCase, _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Tuple = model_class(lowercase ) _lowerCamelCase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : int = [*signature.parameters.keys()] _lowerCamelCase : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase ) def A_ ( self ): _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def A_ ( self ): _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase ) @slow def A_ ( self ): for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : int = SwiftFormerModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) @unittest.skip(reason='SwiftFormer does not output attentions' ) def A_ ( self ): pass def A_ ( self ): def check_hidden_states_output(lowercase , lowercase , lowercase ): _lowerCamelCase : int = model_class(lowercase ) model.to(lowercase ) model.eval() with torch.no_grad(): _lowerCamelCase : List[str] = model(**self._prepare_for_class(lowercase , lowercase ) ) _lowerCamelCase : Dict = outputs.hidden_states _lowerCamelCase : Optional[int] = 8 self.assertEqual(len(lowercase ) , lowercase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(lowercase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) _lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Any = True check_hidden_states_output(lowercase , lowercase , lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCamelCase : Dict = True check_hidden_states_output(lowercase , lowercase , lowercase ) def A_ ( self ): def _config_zero_init(lowercase ): _lowerCamelCase : List[str] = copy.deepcopy(lowercase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(lowercase , lowercase , 1E-10 ) if isinstance(getattr(lowercase , lowercase , lowercase ) , lowercase ): _lowerCamelCase : int = _config_zero_init(getattr(lowercase , lowercase ) ) setattr(lowercase , lowercase , lowercase ) return configs_no_init _lowerCamelCase, _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Optional[int] = _config_zero_init(lowercase ) for model_class in self.all_model_classes: _lowerCamelCase : Union[str, Any] = model_class(config=lowercase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def A_ ( self ): pass def _snake_case ( ): _lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def A_ ( self ): return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None @slow def A_ ( self ): _lowerCamelCase : Dict = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(lowercase ) _lowerCamelCase : Tuple = self.default_image_processor _lowerCamelCase : Optional[int] = prepare_img() _lowerCamelCase : Optional[int] = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase ) # forward pass with torch.no_grad(): _lowerCamelCase : Any = model(**lowercase ) # verify the logits _lowerCamelCase : Any = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowercase ) _lowerCamelCase : str = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
96
"""simple docstring""" import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , lowercase , lowercase=768 ): super().__init__(lowercase ) _lowerCamelCase : Any = proj_size _lowerCamelCase : Dict = CLIPVisionModel(lowercase ) _lowerCamelCase : List[str] = PaintByExampleMapper(lowercase ) _lowerCamelCase : Optional[Any] = nn.LayerNorm(config.hidden_size ) _lowerCamelCase : int = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling _lowerCamelCase : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def A_ ( self , lowercase , lowercase=False ): _lowerCamelCase : Union[str, Any] = self.model(pixel_values=lowercase ) _lowerCamelCase : int = clip_output.pooler_output _lowerCamelCase : str = self.mapper(latent_states[:, None] ) _lowerCamelCase : List[Any] = self.final_layer_norm(lowercase ) _lowerCamelCase : Dict = self.proj_out(lowercase ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self , lowercase ): super().__init__() _lowerCamelCase : Tuple = (config.num_hidden_layers + 1) // 5 _lowerCamelCase : int = config.hidden_size _lowerCamelCase : Optional[Any] = 1 _lowerCamelCase : str = nn.ModuleList( [ BasicTransformerBlock(lowercase , lowercase , lowercase , activation_fn='gelu' , attention_bias=lowercase ) for _ in range(lowercase ) ] ) def A_ ( self , lowercase ): for block in self.blocks: _lowerCamelCase : Tuple = block(lowercase ) return hidden_states
96
1
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = 42 lowerCamelCase__ = 42 class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase ): _lowerCamelCase : list[list[Edge]] = [[] for _ in range(lowercase )] _lowerCamelCase : List[Any] = size def __getitem__( self , lowercase ): return iter(self._graph[vertex] ) @property def A_ ( self ): return self._size def A_ ( self , lowercase , lowercase , lowercase ): if weight not in (0, 1): raise ValueError('Edge weight must be either 0 or 1.' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('Vertex indexes must be in [0; size).' ) self._graph[from_vertex].append(Edge(lowercase , lowercase ) ) def A_ ( self , lowercase , lowercase ): _lowerCamelCase : Dict = deque([start_vertex] ) _lowerCamelCase : list[int | None] = [None] * self.size _lowerCamelCase : str = 0 while queue: _lowerCamelCase : Tuple = queue.popleft() _lowerCamelCase : Any = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: _lowerCamelCase : Dict = current_distance + edge.weight _lowerCamelCase : Optional[Any] = distances[edge.destination_vertex] if ( isinstance(lowercase , lowercase ) and new_distance >= dest_vertex_distance ): continue _lowerCamelCase : Union[str, Any] = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('No path from start_vertex to finish_vertex.' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
96
"""simple docstring""" lowercase__ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) lowercase__ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : List[Any] = from_type.lower().strip('s' ) _lowerCamelCase : List[Any] = to_type.lower().strip('s' ) _lowerCamelCase : Optional[int] = UNIT_SYMBOL.get(lowercase__ , lowercase__ ) _lowerCamelCase : Any = UNIT_SYMBOL.get(lowercase__ , lowercase__ ) if from_sanitized not in METRIC_CONVERSION: _lowerCamelCase : Tuple = ( f'''Invalid \'from_type\' value: {from_type!r}.\n''' f'''Conversion abbreviations are: {', '.join(lowercase__ )}''' ) raise ValueError(lowercase__ ) if to_sanitized not in METRIC_CONVERSION: _lowerCamelCase : Any = ( f'''Invalid \'to_type\' value: {to_type!r}.\n''' f'''Conversion abbreviations are: {', '.join(lowercase__ )}''' ) raise ValueError(lowercase__ ) _lowerCamelCase : List[Any] = METRIC_CONVERSION[from_sanitized] _lowerCamelCase : int = METRIC_CONVERSION[to_sanitized] _lowerCamelCase : List[str] = 1 if from_exponent > to_exponent: _lowerCamelCase : List[str] = from_exponent - to_exponent else: _lowerCamelCase : List[Any] = -(to_exponent - from_exponent) return value * pow(10 , lowercase__ ) if __name__ == "__main__": from doctest import testmod testmod()
96
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
96
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
96
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowercase__ = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
96
"""simple docstring""" def _snake_case ( lowercase__ , lowercase__ ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) _lowerCamelCase : List[Any] = (boundary[1] - boundary[0]) / steps _lowerCamelCase : Tuple = boundary[0] _lowerCamelCase : Dict = boundary[1] _lowerCamelCase : List[Any] = make_points(lowercase__ , lowercase__ , lowercase__ ) _lowerCamelCase : List[Any] = 0.0 y += (h / 2.0) * f(lowercase__ ) for i in x_i: # print(i) y += h * f(lowercase__ ) y += (h / 2.0) * f(lowercase__ ) return y def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : str = a + h while x < (b - h): yield x _lowerCamelCase : int = x + h def _snake_case ( lowercase__ ): # enter your function here _lowerCamelCase : Optional[Any] = (x - 0) * (x - 0) return y def _snake_case ( ): _lowerCamelCase : int = 0.0 # Lower bound of integration _lowerCamelCase : Optional[int] = 1.0 # Upper bound of integration _lowerCamelCase : List[str] = 1_0.0 # define number of steps or resolution _lowerCamelCase : List[Any] = [a, b] # define boundary of integration _lowerCamelCase : Optional[Any] = method_a(lowercase__ , lowercase__ ) print(f'''y = {y}''' ) if __name__ == "__main__": main()
96
1
"""simple docstring""" import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py lowercase__ = """src/transformers""" lowercase__ = """docs/source/en/tasks""" def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): with open(lowercase__ , 'r' , encoding='utf-8' , newline='\n' ) as f: _lowerCamelCase : Tuple = f.readlines() # Find the start prompt. _lowerCamelCase : Tuple = 0 while not lines[start_index].startswith(lowercase__ ): start_index += 1 start_index += 1 _lowerCamelCase : List[Any] = start_index while not lines[end_index].startswith(lowercase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. lowercase__ = direct_transformers_import(TRANSFORMERS_PATH) lowercase__ = { """asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, """audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, """language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, """image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, """masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, """multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, """object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, """question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, """semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, """sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, """summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, """token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, """translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, """video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, """document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, """monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). lowercase__ = { """summarization.md""": ("""nllb""",), """translation.md""": ("""nllb""",), } def _snake_case ( lowercase__ ): _lowerCamelCase : Dict = TASK_GUIDE_TO_MODELS[task_guide] _lowerCamelCase : Union[str, Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase__ , set() ) _lowerCamelCase : Optional[Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n" def _snake_case ( lowercase__ , lowercase__=False ): _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = _find_text_in_file( filename=os.path.join(lowercase__ , lowercase__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , ) _lowerCamelCase : Any = get_model_list_for_task(lowercase__ ) if current_list != new_list: if overwrite: with open(os.path.join(lowercase__ , lowercase__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`''' ' to fix this.' ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase__ = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
96
"""simple docstring""" import math def _snake_case ( lowercase__ ): return math.sqrt(lowercase__ ) * math.sqrt(lowercase__ ) == num def _snake_case ( lowercase__ ): _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : List[Any] = n while left <= right: _lowerCamelCase : str = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: _lowerCamelCase : str = mid - 1 else: _lowerCamelCase : Optional[int] = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
96
1
"""simple docstring""" from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split lowercase__ = datasets.load_iris() lowercase__ = np.array(data["""data"""]) lowercase__ = np.array(data["""target"""]) lowercase__ = data["""target_names"""] lowercase__ , lowercase__ , lowercase__ , lowercase__ = train_test_split(X, y) def _snake_case ( lowercase__ , lowercase__ ): return np.linalg.norm(np.array(lowercase__ ) - np.array(lowercase__ ) ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ): _lowerCamelCase : Optional[int] = zip(lowercase__ , lowercase__ ) # List of distances of all points from the point to be classified _lowerCamelCase : Optional[int] = [] for data_point in data: _lowerCamelCase : Optional[int] = euclidean_distance(data_point[0] , lowercase__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. _lowerCamelCase : List[str] = [i[1] for i in sorted(lowercase__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified _lowerCamelCase : Union[str, Any] = Counter(lowercase__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
96
"""simple docstring""" import functools from typing import Any def _snake_case ( lowercase__ , lowercase__ ): # Validation if not isinstance(lowercase__ , lowercase__ ) or len(lowercase__ ) == 0: raise ValueError('the string should be not empty string' ) if not isinstance(lowercase__ , lowercase__ ) or not all( isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0 for item in words ): raise ValueError('the words should be a list of non-empty strings' ) # Build trie _lowerCamelCase : dict[str, Any] = {} _lowerCamelCase : List[Any] = 'WORD_KEEPER' for word in words: _lowerCamelCase : Dict = trie for c in word: if c not in trie_node: _lowerCamelCase : Any = {} _lowerCamelCase : str = trie_node[c] _lowerCamelCase : Optional[Any] = True _lowerCamelCase : Dict = len(lowercase__ ) # Dynamic programming method @functools.cache def is_breakable(lowercase__ ) -> bool: if index == len_string: return True _lowerCamelCase : List[Any] = trie for i in range(lowercase__ , lowercase__ ): _lowerCamelCase : Any = trie_node.get(string[i] , lowercase__ ) if trie_node is None: return False if trie_node.get(lowercase__ , lowercase__ ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
96
1
"""simple docstring""" from __future__ import annotations def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : Dict = sorted(numsa + numsa ) _lowerCamelCase, _lowerCamelCase : Any = divmod(len(lowercase__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() lowercase__ = [float(x) for x in input("""Enter the elements of first array: """).split()] lowercase__ = [float(x) for x in input("""Enter the elements of second array: """).split()] print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
96
"""simple docstring""" def _snake_case ( lowercase__ ): if not isinstance(lowercase__ , lowercase__ ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(lowercase__ ) == 0: raise ValueError('Input list must be a non empty list' ) if len(lowercase__ ) == 1: return True _lowerCamelCase : List[Any] = series[1] - series[0] for index in range(len(lowercase__ ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def _snake_case ( lowercase__ ): if not isinstance(lowercase__ , lowercase__ ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(lowercase__ ) == 0: raise ValueError('Input list must be a non empty list' ) _lowerCamelCase : Optional[int] = 0 for val in series: answer += val return answer / len(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
96
1
"""simple docstring""" import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def _snake_case ( lowercase__ ): _lowerCamelCase : Dict = VideoMAEConfig() set_architecture_configs(lowercase__ , lowercase__ ) if "finetuned" not in model_name: _lowerCamelCase : Tuple = False if "finetuned" in model_name: _lowerCamelCase : Optional[Any] = 'huggingface/label-files' if "kinetics" in model_name: _lowerCamelCase : Union[str, Any] = 400 _lowerCamelCase : str = 'kinetics400-id2label.json' elif "ssv2" in model_name: _lowerCamelCase : List[Any] = 174 _lowerCamelCase : Optional[Any] = 'something-something-v2-id2label.json' else: raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' ) _lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) ) _lowerCamelCase : List[Any] = {int(lowercase__ ): v for k, v in idalabel.items()} _lowerCamelCase : List[Any] = idalabel _lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()} return config def _snake_case ( lowercase__ , lowercase__ ): if "small" in model_name: _lowerCamelCase : List[str] = 384 _lowerCamelCase : Dict = 1536 _lowerCamelCase : int = 12 _lowerCamelCase : List[Any] = 16 _lowerCamelCase : Any = 12 _lowerCamelCase : int = 3 _lowerCamelCase : Dict = 192 _lowerCamelCase : Optional[Any] = 768 elif "large" in model_name: _lowerCamelCase : Optional[Any] = 1024 _lowerCamelCase : Tuple = 4096 _lowerCamelCase : int = 24 _lowerCamelCase : Union[str, Any] = 16 _lowerCamelCase : Tuple = 12 _lowerCamelCase : List[str] = 8 _lowerCamelCase : List[Any] = 512 _lowerCamelCase : Any = 2048 elif "huge" in model_name: _lowerCamelCase : Dict = 1280 _lowerCamelCase : Any = 5120 _lowerCamelCase : str = 32 _lowerCamelCase : Union[str, Any] = 16 _lowerCamelCase : str = 12 _lowerCamelCase : Union[str, Any] = 8 _lowerCamelCase : Tuple = 640 _lowerCamelCase : Any = 2560 elif "base" not in model_name: raise ValueError('Model name should include either "small", "base", "large", or "huge"' ) def _snake_case ( lowercase__ ): if "encoder." in name: _lowerCamelCase : Any = name.replace('encoder.' , '' ) if "cls_token" in name: _lowerCamelCase : Any = name.replace('cls_token' , 'videomae.embeddings.cls_token' ) if "decoder_pos_embed" in name: _lowerCamelCase : Dict = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' ) if "pos_embed" in name and "decoder" not in name: _lowerCamelCase : Tuple = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' ) if "patch_embed.proj" in name: _lowerCamelCase : Any = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: _lowerCamelCase : List[Any] = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' ) if "decoder.blocks" in name: _lowerCamelCase : Optional[Any] = name.replace('decoder.blocks' , 'decoder.decoder_layers' ) if "blocks" in name: _lowerCamelCase : Optional[Any] = name.replace('blocks' , 'videomae.encoder.layer' ) if "attn.proj" in name: _lowerCamelCase : Optional[Any] = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name and "bias" not in name: _lowerCamelCase : List[Any] = name.replace('attn' , 'attention.self' ) if "attn" in name: _lowerCamelCase : Optional[Any] = name.replace('attn' , 'attention.attention' ) if "norm1" in name: _lowerCamelCase : List[Any] = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: _lowerCamelCase : str = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: _lowerCamelCase : Optional[Any] = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: _lowerCamelCase : Optional[int] = name.replace('mlp.fc2' , 'output.dense' ) if "decoder_embed" in name: _lowerCamelCase : Optional[int] = name.replace('decoder_embed' , 'decoder.decoder_embed' ) if "decoder_norm" in name: _lowerCamelCase : Optional[Any] = name.replace('decoder_norm' , 'decoder.decoder_norm' ) if "decoder_pred" in name: _lowerCamelCase : Optional[Any] = name.replace('decoder_pred' , 'decoder.decoder_pred' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: _lowerCamelCase : int = name.replace('norm.weight' , 'videomae.layernorm.weight' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: _lowerCamelCase : Union[str, Any] = name.replace('norm.bias' , 'videomae.layernorm.bias' ) if "head" in name and "decoder" not in name: _lowerCamelCase : Any = name.replace('head' , 'classifier' ) return name def _snake_case ( lowercase__ , lowercase__ ): for key in orig_state_dict.copy().keys(): _lowerCamelCase : List[str] = orig_state_dict.pop(lowercase__ ) if key.startswith('encoder.' ): _lowerCamelCase : int = key.replace('encoder.' , '' ) if "qkv" in key: _lowerCamelCase : str = key.split('.' ) if key.startswith('decoder.blocks' ): _lowerCamelCase : Any = config.decoder_hidden_size _lowerCamelCase : Optional[int] = int(key_split[2] ) _lowerCamelCase : List[str] = 'decoder.decoder_layers.' if "weight" in key: _lowerCamelCase : Optional[Any] = val[:dim, :] _lowerCamelCase : Any = val[dim : dim * 2, :] _lowerCamelCase : Union[str, Any] = val[-dim:, :] else: _lowerCamelCase : int = config.hidden_size _lowerCamelCase : Optional[int] = int(key_split[1] ) _lowerCamelCase : Tuple = 'videomae.encoder.layer.' if "weight" in key: _lowerCamelCase : List[str] = val[:dim, :] _lowerCamelCase : Optional[int] = val[dim : dim * 2, :] _lowerCamelCase : List[Any] = val[-dim:, :] else: _lowerCamelCase : int = val return orig_state_dict def _snake_case ( ): _lowerCamelCase : Optional[Any] = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) _lowerCamelCase : Any = np.load(lowercase__ ) return list(lowercase__ ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : str = get_videomae_config(lowercase__ ) if "finetuned" in model_name: _lowerCamelCase : str = VideoMAEForVideoClassification(lowercase__ ) else: _lowerCamelCase : Union[str, Any] = VideoMAEForPreTraining(lowercase__ ) # download original checkpoint, hosted on Google Drive _lowerCamelCase : Any = 'pytorch_model.bin' gdown.cached_download(lowercase__ , lowercase__ , quiet=lowercase__ ) _lowerCamelCase : Optional[int] = torch.load(lowercase__ , map_location='cpu' ) if "model" in files: _lowerCamelCase : Any = files['model'] else: _lowerCamelCase : Dict = files['module'] _lowerCamelCase : Union[str, Any] = convert_state_dict(lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() # verify model on basic input _lowerCamelCase : Any = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) _lowerCamelCase : Tuple = prepare_video() _lowerCamelCase : List[Any] = image_processor(lowercase__ , return_tensors='pt' ) if "finetuned" not in model_name: _lowerCamelCase : Tuple = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) _lowerCamelCase : List[Any] = torch.load(lowercase__ ) _lowerCamelCase : Any = model(**lowercase__ ) _lowerCamelCase : Union[str, Any] = outputs.logits _lowerCamelCase : Optional[int] = [ 'videomae-small-finetuned-kinetics', 'videomae-small-finetuned-ssv2', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) 'videomae-base-short', 'videomae-base-short-finetuned-kinetics', 'videomae-base', 'videomae-base-finetuned-kinetics', 'videomae-large', 'videomae-large-finetuned-kinetics', 'videomae-huge-finetuned-kinetics', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) 'videomae-base-short-ssv2', 'videomae-base-short-finetuned-ssv2', 'videomae-base-ssv2', 'videomae-base-finetuned-ssv2', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": _lowerCamelCase : Optional[Any] = torch.Size([1, 400] ) _lowerCamelCase : int = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] ) elif model_name == "videomae-small-finetuned-ssv2": _lowerCamelCase : Optional[int] = torch.Size([1, 174] ) _lowerCamelCase : int = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] ) elif model_name == "videomae-base": _lowerCamelCase : int = torch.Size([1, 1408, 1536] ) _lowerCamelCase : Dict = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] ) elif model_name == "videomae-base-short": _lowerCamelCase : Any = torch.Size([1, 1408, 1536] ) _lowerCamelCase : Optional[Any] = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] ) # we verified the loss both for normalized and unnormalized targets for this one _lowerCamelCase : str = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] ) elif model_name == "videomae-large": _lowerCamelCase : Any = torch.Size([1, 1408, 1536] ) _lowerCamelCase : int = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] ) elif model_name == "videomae-large-finetuned-kinetics": _lowerCamelCase : List[str] = torch.Size([1, 400] ) _lowerCamelCase : Optional[int] = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] ) elif model_name == "videomae-huge-finetuned-kinetics": _lowerCamelCase : Any = torch.Size([1, 400] ) _lowerCamelCase : Any = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] ) elif model_name == "videomae-base-short-finetuned-kinetics": _lowerCamelCase : Union[str, Any] = torch.Size([1, 400] ) _lowerCamelCase : Tuple = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] ) elif model_name == "videomae-base-finetuned-kinetics": _lowerCamelCase : Dict = torch.Size([1, 400] ) _lowerCamelCase : List[str] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ) elif model_name == "videomae-base-short-ssv2": _lowerCamelCase : Any = torch.Size([1, 1408, 1536] ) _lowerCamelCase : Optional[Any] = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] ) elif model_name == "videomae-base-short-finetuned-ssv2": _lowerCamelCase : Union[str, Any] = torch.Size([1, 174] ) _lowerCamelCase : Dict = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] ) elif model_name == "videomae-base-ssv2": _lowerCamelCase : Optional[int] = torch.Size([1, 1408, 1536] ) _lowerCamelCase : str = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] ) elif model_name == "videomae-base-finetuned-ssv2": _lowerCamelCase : Tuple = torch.Size([1, 174] ) _lowerCamelCase : Dict = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] ) else: raise ValueError(f'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 ) else: print('Logits:' , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , lowercase__ , atol=1E-4 ) print('Logits ok!' ) # verify loss, if applicable if model_name == "videomae-base-short": _lowerCamelCase : Dict = outputs.loss assert torch.allclose(lowercase__ , lowercase__ , atol=1E-4 ) print('Loss ok!' ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowercase__ ) model.save_pretrained(lowercase__ ) if push_to_hub: print('Pushing to the hub...' ) model.push_to_hub(lowercase__ , organization='nielsr' ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""", type=str, help=( """URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct""" """ download link.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default="""/Users/nielsrogge/Documents/VideoMAE/Test""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase__ = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
96
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowercase__ = 16 lowercase__ = 32 def _snake_case ( lowercase__ , lowercase__ = 16 , lowercase__ = "bert-base-cased" ): _lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(lowercase__ ) _lowerCamelCase : Tuple = load_dataset('glue' , 'mrpc' ) def tokenize_function(lowercase__ ): # max_length=None => use the model max length (it's actually the default) _lowerCamelCase : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase__ , max_length=lowercase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset _lowerCamelCase : int = datasets.map( lowercase__ , batched=lowercase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowercase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowerCamelCase : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(lowercase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowercase__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(lowercase__ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. _lowerCamelCase : List[str] = DataLoader( tokenized_datasets['train'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) _lowerCamelCase : int = DataLoader( tokenized_datasets['validation'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) return train_dataloader, eval_dataloader def _snake_case ( lowercase__ , lowercase__ ): # Initialize accelerator _lowerCamelCase : Optional[int] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCamelCase : Optional[int] = config['lr'] _lowerCamelCase : Optional[int] = int(config['num_epochs'] ) _lowerCamelCase : Union[str, Any] = int(config['seed'] ) _lowerCamelCase : Optional[int] = int(config['batch_size'] ) _lowerCamelCase : Dict = args.model_name_or_path set_seed(lowercase__ ) _lowerCamelCase, _lowerCamelCase : Optional[int] = get_dataloaders(lowercase__ , lowercase__ , lowercase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCamelCase : int = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ ) # Instantiate optimizer _lowerCamelCase : Optional[int] = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) _lowerCamelCase : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=lowercase__ ) if accelerator.state.deepspeed_plugin is not None: _lowerCamelCase : str = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: _lowerCamelCase : Tuple = 1 _lowerCamelCase : List[Any] = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): _lowerCamelCase : Tuple = get_linear_schedule_with_warmup( optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , ) else: _lowerCamelCase : Any = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = accelerator.prepare( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # We need to keep track of how many total steps we have iterated over _lowerCamelCase : Union[str, Any] = 0 # We also need to keep track of the stating epoch so files are named properly _lowerCamelCase : Dict = 0 # Now we train the model _lowerCamelCase : Dict = evaluate.load('glue' , 'mrpc' ) _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : str = {} for epoch in range(lowercase__ , lowercase__ ): model.train() for step, batch in enumerate(lowercase__ ): _lowerCamelCase : List[Any] = model(**lowercase__ ) _lowerCamelCase : int = outputs.loss _lowerCamelCase : Dict = loss / gradient_accumulation_steps accelerator.backward(lowercase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() _lowerCamelCase : Union[str, Any] = 0 for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**lowercase__ ) _lowerCamelCase : Dict = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times _lowerCamelCase, _lowerCamelCase : List[str] = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowercase__ ) - 1: _lowerCamelCase : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen] _lowerCamelCase : Dict = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowercase__ , references=lowercase__ , ) _lowerCamelCase : List[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , lowercase__ ) _lowerCamelCase : Tuple = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: _lowerCamelCase : str = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(lowercase__ , lowercase__ ) def _snake_case ( ): _lowerCamelCase : Any = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=lowercase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowercase__ , ) parser.add_argument( '--output_dir' , type=lowercase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=lowercase__ , default=lowercase__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=lowercase__ , default=3 , help='Number of train epochs.' , ) _lowerCamelCase : Optional[Any] = parser.parse_args() _lowerCamelCase : str = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(lowercase__ , lowercase__ ) if __name__ == "__main__": main()
96
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase__ = { """configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegaForCausalLM""", """MegaForMaskedLM""", """MegaForMultipleChoice""", """MegaForQuestionAnswering""", """MegaForSequenceClassification""", """MegaForTokenClassification""", """MegaModel""", """MegaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
96
"""simple docstring""" from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """new-model""" if is_tf_available(): class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = NewModelConfig @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def A_ ( self ): _lowerCamelCase : List[str] = 'bert-base-cased' _lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): _lowerCamelCase : List[str] = 'bert-base-cased' _lowerCamelCase : int = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : int = TFAutoModelForPreTraining.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : int = TFAutoModelForCausalLM.from_pretrained(lowercase ) _lowerCamelCase, _lowerCamelCase : str = TFAutoModelForCausalLM.from_pretrained(lowercase , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : List[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : str = TFAutoModelWithLMHead.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Tuple = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase ) _lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForMaskedLM.from_pretrained(lowercase , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase ) _lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: _lowerCamelCase : str = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: _lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : List[str] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow @require_tensorflow_probability def A_ ( self ): for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: _lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase ) _lowerCamelCase, _lowerCamelCase : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained( lowercase , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) def A_ ( self ): _lowerCamelCase : int = TFAutoModelWithLMHead.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 ) def A_ ( self ): _lowerCamelCase : Any = TFAutoModelWithLMHead.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 ) def A_ ( self ): # For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel _lowerCamelCase : List[str] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Optional[int] = copy.deepcopy(model.config ) _lowerCamelCase : Dict = ['FunnelBaseModel'] _lowerCamelCase : List[Any] = TFAutoModel.from_config(lowercase ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowercase ) _lowerCamelCase : Tuple = TFAutoModel.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) def A_ ( self ): try: AutoConfig.register('new-model' , lowercase ) _lowerCamelCase : Tuple = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(lowercase ): auto_class.register(lowercase , lowercase ) auto_class.register(lowercase , lowercase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase ): auto_class.register(lowercase , lowercase ) # Now that the config is registered, it can be used as any other config with the auto-API _lowerCamelCase : Optional[Any] = BertModelTester(self ).get_config() _lowerCamelCase : Dict = NewModelConfig(**tiny_config.to_dict() ) _lowerCamelCase : int = auto_class.from_config(lowercase ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowercase ) _lowerCamelCase : List[Any] = auto_class.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def A_ ( self ): with self.assertRaisesRegex( lowercase , 'bert-base is not a local folder and is not a valid model identifier' ): _lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained('bert-base' ) def A_ ( self ): with self.assertRaisesRegex( lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): _lowerCamelCase : str = TFAutoModel.from_pretrained(lowercase , revision='aaaaaa' ) def A_ ( self ): with self.assertRaisesRegex( lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ): _lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' ) def A_ ( self ): with self.assertRaisesRegex(lowercase , 'Use `from_pt=True` to load this model' ): _lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' ) def A_ ( self ): # Make sure we have cached the model. _lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) with RequestCounter() as counter: _lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint _lowerCamelCase : int = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) with RequestCounter() as counter: _lowerCamelCase : List[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
96
1
"""simple docstring""" lowercase__ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) lowercase__ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : List[Any] = from_type.lower().strip('s' ) _lowerCamelCase : List[Any] = to_type.lower().strip('s' ) _lowerCamelCase : Optional[int] = UNIT_SYMBOL.get(lowercase__ , lowercase__ ) _lowerCamelCase : Any = UNIT_SYMBOL.get(lowercase__ , lowercase__ ) if from_sanitized not in METRIC_CONVERSION: _lowerCamelCase : Tuple = ( f'''Invalid \'from_type\' value: {from_type!r}.\n''' f'''Conversion abbreviations are: {', '.join(lowercase__ )}''' ) raise ValueError(lowercase__ ) if to_sanitized not in METRIC_CONVERSION: _lowerCamelCase : Any = ( f'''Invalid \'to_type\' value: {to_type!r}.\n''' f'''Conversion abbreviations are: {', '.join(lowercase__ )}''' ) raise ValueError(lowercase__ ) _lowerCamelCase : List[Any] = METRIC_CONVERSION[from_sanitized] _lowerCamelCase : int = METRIC_CONVERSION[to_sanitized] _lowerCamelCase : List[str] = 1 if from_exponent > to_exponent: _lowerCamelCase : List[str] = from_exponent - to_exponent else: _lowerCamelCase : List[Any] = -(to_exponent - from_exponent) return value * pow(10 , lowercase__ ) if __name__ == "__main__": from doctest import testmod testmod()
96
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """IBertForMaskedLM""", """IBertForMultipleChoice""", """IBertForQuestionAnswering""", """IBertForSequenceClassification""", """IBertForTokenClassification""", """IBertModel""", """IBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
96
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , lowercase , lowercase ): super().__init__() self.register_modules(unet=lowercase , scheduler=lowercase ) @torch.no_grad() def __call__( self , lowercase = 1 , lowercase = 100 , lowercase = None , lowercase = None , lowercase = True , ): if audio_length_in_s is None: _lowerCamelCase : Optional[Any] = self.unet.config.sample_size / self.unet.config.sample_rate _lowerCamelCase : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate _lowerCamelCase : Optional[int] = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' ) _lowerCamelCase : Union[str, Any] = int(lowercase ) if sample_size % down_scale_factor != 0: _lowerCamelCase : Dict = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' ' process.' ) _lowerCamelCase : Optional[int] = int(lowercase ) _lowerCamelCase : Any = next(iter(self.unet.parameters() ) ).dtype _lowerCamelCase : Any = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(lowercase , lowercase ) and len(lowercase ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(lowercase )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) _lowerCamelCase : Union[str, Any] = randn_tensor(lowercase , generator=lowercase , device=self.device , dtype=lowercase ) # set step values self.scheduler.set_timesteps(lowercase , device=audio.device ) _lowerCamelCase : str = self.scheduler.timesteps.to(lowercase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output _lowerCamelCase : Optional[int] = self.unet(lowercase , lowercase ).sample # 2. compute previous image: x_t -> t_t-1 _lowerCamelCase : Tuple = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample _lowerCamelCase : Optional[int] = audio.clamp(-1 , 1 ).float().cpu().numpy() _lowerCamelCase : Optional[Any] = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=lowercase )
96
"""simple docstring""" import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : Tuple = f'''{sampling_rate}''' _lowerCamelCase : str = '1' _lowerCamelCase : str = 'f32le' _lowerCamelCase : Union[str, Any] = [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(lowercase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: _lowerCamelCase : str = ffmpeg_process.communicate(lowercase__ ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error _lowerCamelCase : List[Any] = output_stream[0] _lowerCamelCase : Tuple = np.frombuffer(lowercase__ , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def _snake_case ( lowercase__ , lowercase__ , lowercase__ = "f32le" , ): _lowerCamelCase : Optional[Any] = f'''{sampling_rate}''' _lowerCamelCase : List[str] = '1' if format_for_conversion == "s16le": _lowerCamelCase : List[str] = 2 elif format_for_conversion == "f32le": _lowerCamelCase : List[Any] = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) _lowerCamelCase : Dict = platform.system() if system == "Linux": _lowerCamelCase : Optional[int] = 'alsa' _lowerCamelCase : Optional[Any] = 'default' elif system == "Darwin": _lowerCamelCase : Optional[int] = 'avfoundation' _lowerCamelCase : Any = ':0' elif system == "Windows": _lowerCamelCase : Tuple = 'dshow' _lowerCamelCase : Tuple = 'default' _lowerCamelCase : Optional[int] = [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] _lowerCamelCase : Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample _lowerCamelCase : List[Any] = _ffmpeg_stream(lowercase__ , lowercase__ ) for item in iterator: yield item def _snake_case ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = "f32le" , ): if stream_chunk_s is not None: _lowerCamelCase : int = stream_chunk_s else: _lowerCamelCase : Optional[Any] = chunk_length_s _lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowercase__ , lowercase__ , format_for_conversion=lowercase__ ) if format_for_conversion == "s16le": _lowerCamelCase : List[str] = np.intaa _lowerCamelCase : str = 2 elif format_for_conversion == "f32le": _lowerCamelCase : Any = np.floataa _lowerCamelCase : List[Any] = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: _lowerCamelCase : Union[str, Any] = chunk_length_s / 6 _lowerCamelCase : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowercase__ , (int, float) ): _lowerCamelCase : Any = [stride_length_s, stride_length_s] _lowerCamelCase : Tuple = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample _lowerCamelCase : Optional[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample _lowerCamelCase : List[Any] = datetime.datetime.now() _lowerCamelCase : Optional[int] = datetime.timedelta(seconds=lowercase__ ) for item in chunk_bytes_iter(lowercase__ , lowercase__ , stride=(stride_left, stride_right) , stream=lowercase__ ): # Put everything back in numpy scale _lowerCamelCase : List[Any] = np.frombuffer(item['raw'] , dtype=lowercase__ ) _lowerCamelCase : int = ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) _lowerCamelCase : Optional[int] = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ): _lowerCamelCase : int = B'' _lowerCamelCase, _lowerCamelCase : Dict = stride if stride_left + stride_right >= chunk_len: raise ValueError( f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) _lowerCamelCase : str = 0 for raw in iterator: acc += raw if stream and len(lowercase__ ) < chunk_len: _lowerCamelCase : Optional[int] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowercase__ ) >= chunk_len: # We are flushing the accumulator _lowerCamelCase : str = (_stride_left, stride_right) _lowerCamelCase : str = {'raw': acc[:chunk_len], 'stride': stride} if stream: _lowerCamelCase : List[Any] = False yield item _lowerCamelCase : Optional[Any] = stride_left _lowerCamelCase : str = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowercase__ ) > stride_left: _lowerCamelCase : Optional[Any] = {'raw': acc, 'stride': (_stride_left, 0)} if stream: _lowerCamelCase : Tuple = False yield item def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : int = 2**24 # 16Mo try: with subprocess.Popen(lowercase__ , stdout=subprocess.PIPE , bufsize=lowercase__ ) as ffmpeg_process: while True: _lowerCamelCase : Optional[Any] = ffmpeg_process.stdout.read(lowercase__ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
96
1
"""simple docstring""" import string import numpy def _snake_case ( lowercase__ , lowercase__ ): return b if a == 0 else greatest_common_divisor(b % a , lowercase__ ) class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) lowerCamelCase__ = numpy.vectorize(lambda lowercase : x % 36 ) lowerCamelCase__ = numpy.vectorize(lowercase ) def __init__( self , lowercase ): _lowerCamelCase : Optional[int] = self.modulus(lowercase ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key _lowerCamelCase : Dict = encrypt_key.shape[0] def A_ ( self , lowercase ): return self.key_string.index(lowercase ) def A_ ( self , lowercase ): return self.key_string[round(lowercase )] def A_ ( self ): _lowerCamelCase : Optional[Any] = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: _lowerCamelCase : Dict = det % len(self.key_string ) _lowerCamelCase : List[Any] = len(self.key_string ) if greatest_common_divisor(lowercase , len(self.key_string ) ) != 1: _lowerCamelCase : List[Any] = ( F'''determinant modular {req_l} of encryption key({det}) ''' F'''is not co prime w.r.t {req_l}.\nTry another key.''' ) raise ValueError(lowercase ) def A_ ( self , lowercase ): _lowerCamelCase : int = [char for char in text.upper() if char in self.key_string] _lowerCamelCase : Optional[int] = chars[-1] while len(lowercase ) % self.break_key != 0: chars.append(lowercase ) return "".join(lowercase ) def A_ ( self , lowercase ): _lowerCamelCase : List[Any] = self.process_text(text.upper() ) _lowerCamelCase : Dict = '' for i in range(0 , len(lowercase ) - self.break_key + 1 , self.break_key ): _lowerCamelCase : Dict = text[i : i + self.break_key] _lowerCamelCase : Any = [self.replace_letters(lowercase ) for char in batch] _lowerCamelCase : Optional[Any] = numpy.array([vec] ).T _lowerCamelCase : int = self.modulus(self.encrypt_key.dot(lowercase ) ).T.tolist()[ 0 ] _lowerCamelCase : Tuple = ''.join( self.replace_digits(lowercase ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def A_ ( self ): _lowerCamelCase : Tuple = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: _lowerCamelCase : Dict = det % len(self.key_string ) _lowerCamelCase : Tuple = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: _lowerCamelCase : Optional[int] = i break _lowerCamelCase : Any = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(lowercase ) ) def A_ ( self , lowercase ): _lowerCamelCase : Any = self.make_decrypt_key() _lowerCamelCase : List[Any] = self.process_text(text.upper() ) _lowerCamelCase : Any = '' for i in range(0 , len(lowercase ) - self.break_key + 1 , self.break_key ): _lowerCamelCase : Optional[Any] = text[i : i + self.break_key] _lowerCamelCase : Optional[Any] = [self.replace_letters(lowercase ) for char in batch] _lowerCamelCase : Union[str, Any] = numpy.array([vec] ).T _lowerCamelCase : List[Any] = self.modulus(decrypt_key.dot(lowercase ) ).T.tolist()[0] _lowerCamelCase : Tuple = ''.join( self.replace_digits(lowercase ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def _snake_case ( ): _lowerCamelCase : Optional[int] = int(input('Enter the order of the encryption key: ' ) ) _lowerCamelCase : str = [] print('Enter each row of the encryption key with space separated integers' ) for _ in range(lowercase__ ): _lowerCamelCase : str = [int(lowercase__ ) for x in input().split()] hill_matrix.append(lowercase__ ) _lowerCamelCase : Optional[int] = HillCipher(numpy.array(lowercase__ ) ) print('Would you like to encrypt or decrypt some text? (1 or 2)' ) _lowerCamelCase : str = input('\n1. Encrypt\n2. Decrypt\n' ) if option == "1": _lowerCamelCase : Dict = input('What text would you like to encrypt?: ' ) print('Your encrypted text is:' ) print(hc.encrypt(lowercase__ ) ) elif option == "2": _lowerCamelCase : Any = input('What text would you like to decrypt?: ' ) print('Your decrypted text is:' ) print(hc.decrypt(lowercase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
96
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""} class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """ctrl""" lowerCamelCase__ = ["""past_key_values"""] lowerCamelCase__ = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , lowercase=246534 , lowercase=256 , lowercase=1280 , lowercase=8192 , lowercase=48 , lowercase=16 , lowercase=0.1 , lowercase=0.1 , lowercase=1E-6 , lowercase=0.02 , lowercase=True , **lowercase , ): _lowerCamelCase : Any = vocab_size _lowerCamelCase : Dict = n_positions _lowerCamelCase : Optional[int] = n_embd _lowerCamelCase : str = n_layer _lowerCamelCase : Union[str, Any] = n_head _lowerCamelCase : Any = dff _lowerCamelCase : int = resid_pdrop _lowerCamelCase : Dict = embd_pdrop _lowerCamelCase : Union[str, Any] = layer_norm_epsilon _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : str = use_cache super().__init__(**lowercase )
96
1
"""simple docstring""" import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=64 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ): _lowerCamelCase : List[Any] = parent _lowerCamelCase : str = batch_size _lowerCamelCase : List[str] = seq_length _lowerCamelCase : Dict = is_training _lowerCamelCase : int = use_input_mask _lowerCamelCase : List[Any] = use_token_type_ids _lowerCamelCase : int = use_labels _lowerCamelCase : str = vocab_size _lowerCamelCase : str = hidden_size _lowerCamelCase : Any = embedding_size _lowerCamelCase : Any = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : Dict = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_act _lowerCamelCase : List[Any] = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : List[str] = type_vocab_size _lowerCamelCase : List[Any] = type_sequence_label_size _lowerCamelCase : Optional[Any] = initializer_range _lowerCamelCase : str = num_labels _lowerCamelCase : int = num_choices _lowerCamelCase : Any = scope def A_ ( self ): _lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCamelCase : int = None if self.use_input_mask: _lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCamelCase : Optional[Any] = None if self.use_token_type_ids: _lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCamelCase : Dict = None _lowerCamelCase : Any = None _lowerCamelCase : Union[str, Any] = None if self.use_labels: _lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) _lowerCamelCase : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A_ ( self ): return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : Union[str, Any] = MobileBertModel(config=lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : Optional[int] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase ) _lowerCamelCase : Union[str, Any] = model(lowercase , token_type_ids=lowercase ) _lowerCamelCase : List[str] = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : Union[str, Any] = MobileBertForMaskedLM(config=lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : List[Any] = MobileBertForNextSentencePrediction(config=lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : List[str] = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : str = MobileBertForPreTraining(config=lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : int = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , next_sentence_label=lowercase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : Any = MobileBertForQuestionAnswering(config=lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : str = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : Optional[int] = self.num_labels _lowerCamelCase : List[str] = MobileBertForSequenceClassification(lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : str = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : Dict = self.num_labels _lowerCamelCase : Optional[int] = MobileBertForTokenClassification(config=lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : List[str] = self.num_choices _lowerCamelCase : Optional[int] = MobileBertForMultipleChoice(config=lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : Tuple = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A_ ( self ): _lowerCamelCase : Tuple = self.prepare_config_and_inputs() ( ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ) : Union[str, Any] = config_and_inputs _lowerCamelCase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) lowerCamelCase__ = ( { """feature-extraction""": MobileBertModel, """fill-mask""": MobileBertForMaskedLM, """question-answering""": MobileBertForQuestionAnswering, """text-classification""": MobileBertForSequenceClassification, """token-classification""": MobileBertForTokenClassification, """zero-shot""": MobileBertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ = True def A_ ( self , lowercase , lowercase , lowercase=False ): _lowerCamelCase : Optional[int] = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) if return_labels: if model_class in get_values(lowercase ): _lowerCamelCase : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase ) _lowerCamelCase : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase ) return inputs_dict def A_ ( self ): _lowerCamelCase : int = MobileBertModelTester(self ) _lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=lowercase , hidden_size=37 ) def A_ ( self ): self.config_tester.run_common_tests() def A_ ( self ): _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*lowercase ) def A_ ( self ): _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase ) def A_ ( self ): _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase ) def A_ ( self ): _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase ) def A_ ( self ): _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase ) def A_ ( self ): _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase ) def A_ ( self ): _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase ) def A_ ( self ): _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase ) def _snake_case ( lowercase__ ): return torch.tensor( lowercase__ , dtype=torch.long , device=lowercase__ , ) lowercase__ = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def A_ ( self ): _lowerCamelCase : Optional[int] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowercase ) _lowerCamelCase : Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] ) with torch.no_grad(): _lowerCamelCase : Any = model(lowercase )[0] _lowerCamelCase : Tuple = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , lowercase ) _lowerCamelCase : List[Any] = torch.tensor( [ [ [-2.473_6526E07, 8.269_1656E04, 1.652_1838E05], [-5.754_1704E-01, 3.905_6022E00, 4.401_1507E00], [2.604_7359E00, 1.567_7652E00, -1.732_4188E-01], ] ] , device=lowercase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE _lowerCamelCase : Any = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) _lowerCamelCase : str = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
96
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Any class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase ): _lowerCamelCase : Any = data _lowerCamelCase : Node | None = None class lowerCAmelCase__ : '''simple docstring''' def __init__( self ): _lowerCamelCase : str = None _lowerCamelCase : str = None def __iter__( self ): _lowerCamelCase : List[str] = self.head while self.head: yield node.data _lowerCamelCase : Optional[int] = node.next if node == self.head: break def __len__( self ): return sum(1 for _ in self ) def __repr__( self ): return "->".join(str(lowercase ) for item in iter(self ) ) def A_ ( self , lowercase ): self.insert_nth(len(self ) , lowercase ) def A_ ( self , lowercase ): self.insert_nth(0 , lowercase ) def A_ ( self , lowercase , lowercase ): if index < 0 or index > len(self ): raise IndexError('list index out of range.' ) _lowerCamelCase : List[Any] = Node(lowercase ) if self.head is None: _lowerCamelCase : str = new_node # first node points itself _lowerCamelCase : Union[str, Any] = new_node elif index == 0: # insert at head _lowerCamelCase : List[str] = self.head _lowerCamelCase : str = new_node else: _lowerCamelCase : Union[str, Any] = self.head for _ in range(index - 1 ): _lowerCamelCase : List[Any] = temp.next _lowerCamelCase : Union[str, Any] = temp.next _lowerCamelCase : List[str] = new_node if index == len(self ) - 1: # insert at tail _lowerCamelCase : Any = new_node def A_ ( self ): return self.delete_nth(0 ) def A_ ( self ): return self.delete_nth(len(self ) - 1 ) def A_ ( self , lowercase = 0 ): if not 0 <= index < len(self ): raise IndexError('list index out of range.' ) _lowerCamelCase : Any = self.head if self.head == self.tail: # just one node _lowerCamelCase : List[str] = None elif index == 0: # delete head node _lowerCamelCase : List[str] = self.tail.next.next _lowerCamelCase : Optional[int] = self.head.next else: _lowerCamelCase : Dict = self.head for _ in range(index - 1 ): _lowerCamelCase : List[Any] = temp.next _lowerCamelCase : int = temp.next _lowerCamelCase : Optional[int] = temp.next.next if index == len(self ) - 1: # delete at tail _lowerCamelCase : List[Any] = temp return delete_node.data def A_ ( self ): return len(self ) == 0 def _snake_case ( ): _lowerCamelCase : Union[str, Any] = CircularLinkedList() assert len(lowercase__ ) == 0 assert circular_linked_list.is_empty() is True assert str(lowercase__ ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(lowercase__ ) == i circular_linked_list.insert_nth(lowercase__ , i + 1 ) assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
96
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """speech_to_text_2""" lowerCamelCase__ = ["""past_key_values"""] lowerCamelCase__ = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self , lowercase=10000 , lowercase=6 , lowercase=2048 , lowercase=4 , lowercase=0.0 , lowercase=True , lowercase="relu" , lowercase=256 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=2 , lowercase=True , lowercase=1 , lowercase=0 , lowercase=2 , lowercase=1024 , **lowercase , ): _lowerCamelCase : Dict = vocab_size _lowerCamelCase : str = d_model _lowerCamelCase : List[Any] = decoder_ffn_dim _lowerCamelCase : Optional[int] = decoder_layers _lowerCamelCase : Union[str, Any] = decoder_attention_heads _lowerCamelCase : Dict = dropout _lowerCamelCase : Union[str, Any] = attention_dropout _lowerCamelCase : List[str] = activation_dropout _lowerCamelCase : Optional[int] = activation_function _lowerCamelCase : Union[str, Any] = init_std _lowerCamelCase : Optional[Any] = decoder_layerdrop _lowerCamelCase : Optional[int] = use_cache _lowerCamelCase : Any = decoder_layers _lowerCamelCase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True _lowerCamelCase : Union[str, Any] = max_target_positions super().__init__( pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , **lowercase , )
96
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowercase__ = get_logger(__name__) class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = """dummy_data""" lowerCamelCase__ = """datasets""" lowerCamelCase__ = False def __init__( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = False , lowercase = True , lowercase = None , ): _lowerCamelCase : Optional[Any] = 0 _lowerCamelCase : Dict = dataset_name _lowerCamelCase : Union[str, Any] = cache_dir _lowerCamelCase : Dict = use_local_dummy_data _lowerCamelCase : Tuple = config # download_callbacks take a single url as input _lowerCamelCase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _lowerCamelCase : Any = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _lowerCamelCase : str = str(lowercase ) # to be downloaded _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : int = None @property def A_ ( self ): if self._dummy_file is None: _lowerCamelCase : Tuple = self.download_dummy_data() return self._dummy_file @property def A_ ( self ): if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('dummy' , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join('dummy' , self.version_name ) @property def A_ ( self ): return os.path.join(self.dummy_data_folder , 'dummy_data.zip' ) def A_ ( self ): _lowerCamelCase : List[str] = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _lowerCamelCase : int = cached_path( lowercase , cache_dir=self.cache_dir , extract_compressed_file=lowercase , force_extract=lowercase ) return os.path.join(lowercase , self.dummy_file_name ) @property def A_ ( self ): return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def A_ ( self ): if self._bucket_url is None: _lowerCamelCase : List[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) ) return self._bucket_url @property def A_ ( self ): # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] ) def A_ ( self , lowercase , *lowercase ): if self.load_existing_dummy_data: # dummy data is downloaded and tested _lowerCamelCase : Union[str, Any] = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _lowerCamelCase : Union[str, Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(lowercase , lowercase ): return self.create_dummy_data_dict(lowercase , lowercase ) elif isinstance(lowercase , (list, tuple) ): return self.create_dummy_data_list(lowercase , lowercase ) else: return self.create_dummy_data_single(lowercase , lowercase ) def A_ ( self , lowercase , *lowercase ): return self.download_and_extract(lowercase ) def A_ ( self , lowercase , lowercase ): return self.download_and_extract(lowercase ) def A_ ( self , lowercase , *lowercase , **lowercase ): return path def A_ ( self ): return {} def A_ ( self , lowercase , lowercase ): _lowerCamelCase : Optional[int] = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(lowercase , lowercase ): for single_url in single_urls: download_callback(lowercase ) else: _lowerCamelCase : List[Any] = single_urls download_callback(lowercase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(lowercase , lowercase ): _lowerCamelCase : List[Any] = [os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) ) for x in single_urls] else: _lowerCamelCase : Optional[int] = single_urls _lowerCamelCase : List[Any] = os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) ) _lowerCamelCase : int = value # make sure that values are unique if all(isinstance(lowercase , lowercase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def A_ ( self , lowercase , lowercase ): _lowerCamelCase : Optional[Any] = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _lowerCamelCase : List[str] = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , lowercase ) ) for url in data_url ) _lowerCamelCase : int = all( url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _lowerCamelCase : List[str] = [data_url[0]] * len(lowercase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(lowercase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : str = os.path.join(lowercase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) ) dummy_data_list.append(lowercase ) return dummy_data_list def A_ ( self , lowercase , lowercase ): for download_callback in self.download_callbacks: download_callback(lowercase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : Tuple = os.path.join(lowercase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) ) if os.path.exists(lowercase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def A_ ( self ): pass def A_ ( self ): pass def A_ ( self , lowercase ): def _iter_archive_members(lowercase ): # this preserves the order of the members inside the ZIP archive _lowerCamelCase : str = Path(self.dummy_file ).parent _lowerCamelCase : Union[str, Any] = path.relative_to(lowercase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _lowerCamelCase : List[str] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(lowercase ) _lowerCamelCase : Optional[int] = Path(lowercase ) _lowerCamelCase : Dict = _iter_archive_members(lowercase ) if self.use_local_dummy_data else path.rglob('*' ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('.', '__') ): yield file_path.relative_to(lowercase ).as_posix(), file_path.open('rb' ) def A_ ( self , lowercase ): if not isinstance(lowercase , lowercase ): _lowerCamelCase : List[str] = [paths] for path in paths: if os.path.isfile(lowercase ): if os.path.basename(lowercase ).startswith(('.', '__') ): return yield path else: for dirpath, dirnames, filenames in os.walk(lowercase ): if os.path.basename(lowercase ).startswith(('.', '__') ): continue dirnames.sort() for filename in sorted(lowercase ): if filename.startswith(('.', '__') ): continue yield os.path.join(lowercase , lowercase )
96
1
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging lowercase__ = logging.get_logger(__name__) def _snake_case ( lowercase__ , lowercase__ ): try: with open(lowercase__ , 'rb' ) as flax_state_f: _lowerCamelCase : Union[str, Any] = from_bytes(lowercase__ , flax_state_f.read() ) except UnpicklingError as e: try: with open(lowercase__ ) as f: if f.read().startswith('version' ): raise OSError( 'You seem to have cloned a repository without having git-lfs installed. Please' ' install git-lfs and run `git lfs install` followed by `git lfs pull` in the' ' folder you cloned.' ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f'''Unable to convert {model_file} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(lowercase__ , lowercase__ ) def _snake_case ( lowercase__ , lowercase__ ): try: import torch # noqa: F401 except ImportError: logger.error( 'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see' ' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation' ' instructions.' ) raise # check if we have bf16 weights _lowerCamelCase : Union[str, Any] = flatten_dict(jax.tree_util.tree_map(lambda lowercase__ : x.dtype == jnp.bfloataa , lowercase__ ) ).values() if any(lowercase__ ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( 'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ' 'before loading those in PyTorch model.' ) _lowerCamelCase : Any = jax.tree_util.tree_map( lambda lowercase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowercase__ ) _lowerCamelCase : Tuple = '' _lowerCamelCase : Union[str, Any] = flatten_dict(lowercase__ , sep='.' ) _lowerCamelCase : List[Any] = pt_model.state_dict() # keep track of unexpected & missing keys _lowerCamelCase : str = [] _lowerCamelCase : List[str] = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): _lowerCamelCase : int = flax_key_tuple.split('.' ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: _lowerCamelCase : str = flax_key_tuple_array[:-1] + ['weight'] _lowerCamelCase : Optional[Any] = jnp.transpose(lowercase__ , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": _lowerCamelCase : List[Any] = flax_key_tuple_array[:-1] + ['weight'] _lowerCamelCase : Tuple = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": _lowerCamelCase : str = flax_key_tuple_array[:-1] + ['weight'] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(lowercase__ ): _lowerCamelCase : List[str] = ( flax_key_tuple_string.replace('_0' , '.0' ) .replace('_1' , '.1' ) .replace('_2' , '.2' ) .replace('_3' , '.3' ) .replace('_4' , '.4' ) .replace('_5' , '.5' ) .replace('_6' , '.6' ) .replace('_7' , '.7' ) .replace('_8' , '.8' ) .replace('_9' , '.9' ) ) _lowerCamelCase : int = '.'.join(lowercase__ ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict _lowerCamelCase : str = np.asarray(lowercase__ ) if not isinstance(lowercase__ , np.ndarray ) else flax_tensor _lowerCamelCase : List[str] = torch.from_numpy(lowercase__ ) # remove from missing keys missing_keys.remove(lowercase__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(lowercase__ ) pt_model.load_state_dict(lowercase__ ) # re-transform missing_keys to list _lowerCamelCase : Dict = list(lowercase__ ) if len(lowercase__ ) > 0: logger.warning( 'Some weights of the Flax model were not used when initializing the PyTorch model' f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This' f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a' ' FlaxBertForSequenceClassification model).' ) if len(lowercase__ ) > 0: logger.warning( f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ' use it for predictions and inference.' ) return pt_model
96
"""simple docstring""" def _snake_case ( lowercase__ ): stooge(lowercase__ , 0 , len(lowercase__ ) - 1 ) return arr def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: _lowerCamelCase, _lowerCamelCase : Optional[Any] = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: _lowerCamelCase : Union[str, Any] = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) # Recursively sort last 2/3 elements stooge(lowercase__ , i + t , (lowercase__) ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) if __name__ == "__main__": lowercase__ = input("""Enter numbers separated by a comma:\n""").strip() lowercase__ = [int(item) for item in user_input.split(""",""")] print(stooge_sort(unsorted))
96
1
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int lowercase__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): '''simple docstring''' lowerCamelCase__ = None def _snake_case ( lowercase__ , lowercase__ , ): import pyspark def generate_fn(): _lowerCamelCase : Tuple = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) ) for partition_id in partition_order: _lowerCamelCase : Optional[Any] = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' ) _lowerCamelCase : int = partition_df.collect() _lowerCamelCase : List[Any] = 0 for row in rows: yield f'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class lowerCAmelCase__ ( _BaseExamplesIterable ): '''simple docstring''' def __init__( self , lowercase , lowercase=None , ): _lowerCamelCase : Any = df _lowerCamelCase : List[Any] = partition_order or range(self.df.rdd.getNumPartitions() ) _lowerCamelCase : Union[str, Any] = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self ): yield from self.generate_examples_fn() def A_ ( self , lowercase ): _lowerCamelCase : List[str] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(lowercase ) return SparkExamplesIterable(self.df , partition_order=lowercase ) def A_ ( self , lowercase , lowercase ): _lowerCamelCase : str = self.split_shard_indices_by_worker(lowercase , lowercase ) return SparkExamplesIterable(self.df , partition_order=lowercase ) @property def A_ ( self ): return len(self.partition_order ) class lowerCAmelCase__ ( datasets.DatasetBuilder ): '''simple docstring''' lowerCamelCase__ = SparkConfig def __init__( self , lowercase , lowercase = None , lowercase = None , **lowercase , ): import pyspark _lowerCamelCase : Dict = pyspark.sql.SparkSession.builder.getOrCreate() _lowerCamelCase : List[Any] = df _lowerCamelCase : List[Any] = working_dir super().__init__( cache_dir=lowercase , config_name=str(self.df.semanticHash() ) , **lowercase , ) def A_ ( self ): # Returns the path of the created file. def create_cache_and_write_probe(lowercase ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=lowercase ) _lowerCamelCase : List[str] = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(lowercase , 'a' ) return [probe_file] if self._spark.conf.get('spark.master' , '' ).startswith('local' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _lowerCamelCase : int = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( 'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' ) def A_ ( self ): return datasets.DatasetInfo(features=self.config.features ) def A_ ( self , lowercase ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def A_ ( self , lowercase ): import pyspark def get_arrow_batch_size(lowercase ): for batch in it: yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} ) _lowerCamelCase : int = self.df.count() _lowerCamelCase : Dict = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _lowerCamelCase : Optional[int] = ( self.df.limit(lowercase ) .repartition(1 ) .mapInArrow(lowercase , 'batch_bytes: long' ) .agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) ) .collect()[0] .sample_bytes / sample_num_rows ) _lowerCamelCase : Tuple = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _lowerCamelCase : Dict = min(lowercase , int(approx_total_size / max_shard_size ) ) _lowerCamelCase : List[str] = self.df.repartition(lowercase ) def A_ ( self , lowercase , lowercase , lowercase , ): import pyspark _lowerCamelCase : List[Any] = ParquetWriter if file_format == 'parquet' else ArrowWriter _lowerCamelCase : List[Any] = os.path.join(self._working_dir , os.path.basename(lowercase ) ) if self._working_dir else fpath _lowerCamelCase : Union[str, Any] = file_format == 'parquet' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _lowerCamelCase : Optional[int] = self.config.features _lowerCamelCase : Union[str, Any] = self._writer_batch_size _lowerCamelCase : int = self._fs.storage_options def write_arrow(lowercase ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _lowerCamelCase : Dict = pyspark.TaskContext().taskAttemptId() _lowerCamelCase : List[Any] = next(lowercase , lowercase ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , ) _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Tuple = writer_class( features=lowercase , path=working_fpath.replace('SSSSS' , F'''{shard_id:05d}''' ).replace('TTTTT' , F'''{task_id:05d}''' ) , writer_batch_size=lowercase , storage_options=lowercase , embed_local_files=lowercase , ) _lowerCamelCase : int = pa.Table.from_batches([first_batch] ) writer.write_table(lowercase ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _lowerCamelCase, _lowerCamelCase : str = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) shard_id += 1 _lowerCamelCase : int = writer_class( features=writer._features , path=working_fpath.replace('SSSSS' , F'''{shard_id:05d}''' ).replace('TTTTT' , F'''{task_id:05d}''' ) , writer_batch_size=lowercase , storage_options=lowercase , embed_local_files=lowercase , ) _lowerCamelCase : Optional[int] = pa.Table.from_batches([batch] ) writer.write_table(lowercase ) if writer._num_bytes > 0: _lowerCamelCase, _lowerCamelCase : Optional[Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(lowercase ) ): _lowerCamelCase : Dict = os.path.join(os.path.dirname(lowercase ) , os.path.basename(lowercase ) ) shutil.move(lowercase , lowercase ) _lowerCamelCase : Any = ( self.df.mapInArrow(lowercase , 'task_id: long, num_examples: long, num_bytes: long' ) .groupBy('task_id' ) .agg( pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def A_ ( self , lowercase , lowercase = "arrow" , lowercase = None , lowercase = None , **lowercase , ): self._validate_cache_dir() _lowerCamelCase : Optional[Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(lowercase ) _lowerCamelCase : Optional[Any] = not is_remote_filesystem(self._fs ) _lowerCamelCase : int = os.path.join if is_local else posixpath.join _lowerCamelCase : Optional[Any] = '-TTTTT-SSSSS-of-NNNNN' _lowerCamelCase : Any = F'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' _lowerCamelCase : Any = path_join(self._output_dir , lowercase ) _lowerCamelCase : Dict = 0 _lowerCamelCase : Dict = 0 _lowerCamelCase : Dict = 0 _lowerCamelCase : int = [] _lowerCamelCase : List[Any] = [] for task_id, content in self._prepare_split_single(lowercase , lowercase , lowercase ): ( ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ) : str = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(lowercase ) _lowerCamelCase : int = total_num_examples _lowerCamelCase : Optional[int] = total_num_bytes # should rename everything at the end logger.debug(F'''Renaming {total_shards} shards.''' ) if total_shards > 1: _lowerCamelCase : List[Any] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _lowerCamelCase : Optional[int] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( lowercase , lowercase , lowercase , ): rename( lowercase , fpath.replace('SSSSS' , F'''{shard_id:05d}''' ).replace('TTTTT' , F'''{task_id:05d}''' ) , fpath.replace('TTTTT-SSSSS' , F'''{global_shard_id:05d}''' ).replace('NNNNN' , F'''{total_shards:05d}''' ) , ) _lowerCamelCase : List[Any] = [] _lowerCamelCase : Union[str, Any] = 0 for i in range(len(lowercase ) ): _lowerCamelCase, _lowerCamelCase : List[str] = task_id_and_num_shards[i] for shard_id in range(lowercase ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(lowercase , len(lowercase ) ).map(lambda lowercase : _rename_shard(*lowercase ) ).collect() else: # don't use any pattern _lowerCamelCase : Dict = 0 _lowerCamelCase : Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace('SSSSS' , F'''{shard_id:05d}''' ).replace('TTTTT' , F'''{task_id:05d}''' ) , fpath.replace(lowercase , '' ) , ) def A_ ( self , lowercase , ): return SparkExamplesIterable(self.df )
96
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = ["""image_processor""", """tokenizer"""] lowerCamelCase__ = """BlipImageProcessor""" lowerCamelCase__ = """AutoTokenizer""" def __init__( self , lowercase , lowercase , lowercase ): super().__init__(lowercase , lowercase ) # add QFormer tokenizer _lowerCamelCase : int = qformer_tokenizer def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ): if images is None and text is None: raise ValueError('You have to specify at least images or text.' ) _lowerCamelCase : int = BatchFeature() if text is not None: _lowerCamelCase : List[str] = self.tokenizer( text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) encoding.update(lowercase ) _lowerCamelCase : List[str] = self.qformer_tokenizer( text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) _lowerCamelCase : List[Any] = qformer_text_encoding.pop('input_ids' ) _lowerCamelCase : Tuple = qformer_text_encoding.pop('attention_mask' ) if images is not None: _lowerCamelCase : int = self.image_processor(lowercase , return_tensors=lowercase ) encoding.update(lowercase ) return encoding def A_ ( self , *lowercase , **lowercase ): return self.tokenizer.batch_decode(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.tokenizer.decode(*lowercase , **lowercase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def A_ ( self ): _lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names _lowerCamelCase : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def A_ ( self , lowercase , **lowercase ): if os.path.isfile(lowercase ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(lowercase , exist_ok=lowercase ) _lowerCamelCase : Optional[Any] = os.path.join(lowercase , 'qformer_tokenizer' ) self.qformer_tokenizer.save_pretrained(lowercase ) return super().save_pretrained(lowercase , **lowercase ) @classmethod def A_ ( cls , lowercase , **lowercase ): _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , subfolder='qformer_tokenizer' ) _lowerCamelCase : Dict = cls._get_arguments_from_pretrained(lowercase , **lowercase ) args.append(lowercase ) return cls(*lowercase )
96
1
"""simple docstring""" def _snake_case ( lowercase__ , lowercase__ ): return int((input_a, input_a).count(0 ) == 0 ) def _snake_case ( ): assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
96
"""simple docstring""" import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) lowercase__ = logging.getLogger() def _snake_case ( lowercase__ ): _lowerCamelCase : List[Any] = {} _lowerCamelCase : List[Any] = os.path.join(lowercase__ , 'all_results.json' ) if os.path.exists(lowercase__ ): with open(lowercase__ , 'r' ) as f: _lowerCamelCase : List[Any] = json.load(lowercase__ ) else: raise ValueError(f'''can\'t find {path}''' ) return results lowercase__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def A_ ( self ): import xla_spawn _lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir() _lowerCamelCase : List[Any] = F''' ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(lowercase , 'argv' , lowercase ): _lowerCamelCase : Dict = time() xla_spawn.main() _lowerCamelCase : Any = time() _lowerCamelCase : Optional[int] = get_results(lowercase ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500 ) def A_ ( self ): import xla_spawn _lowerCamelCase : Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split() with patch.object(lowercase , 'argv' , lowercase ): xla_spawn.main()
96
1
"""simple docstring""" def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : List[str] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : str = 0 while b > 0: if b & 1: _lowerCamelCase : List[Any] = ((res % c) + (a % c)) % c a += a b >>= 1 return res
96
"""simple docstring""" from __future__ import annotations import math import numpy as np from numpy.linalg import norm def _snake_case ( lowercase__ , lowercase__ ): return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowercase__ , lowercase__ ) ) ) def _snake_case ( lowercase__ , lowercase__ ): if dataset.ndim != value_array.ndim: _lowerCamelCase : Tuple = ( 'Wrong input data\'s dimensions... ' f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}''' ) raise ValueError(lowercase__ ) try: if dataset.shape[1] != value_array.shape[1]: _lowerCamelCase : Optional[int] = ( 'Wrong input data\'s shape... ' f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}''' ) raise ValueError(lowercase__ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('Wrong shape' ) if dataset.dtype != value_array.dtype: _lowerCamelCase : int = ( 'Input data have different datatype... ' f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}''' ) raise TypeError(lowercase__ ) _lowerCamelCase : Optional[int] = [] for value in value_array: _lowerCamelCase : Tuple = euclidean(lowercase__ , dataset[0] ) _lowerCamelCase : Union[str, Any] = dataset[0].tolist() for dataset_value in dataset[1:]: _lowerCamelCase : Optional[Any] = euclidean(lowercase__ , lowercase__ ) if dist > temp_dist: _lowerCamelCase : List[Any] = temp_dist _lowerCamelCase : List[str] = dataset_value.tolist() answer.append([vector, dist] ) return answer def _snake_case ( lowercase__ , lowercase__ ): return np.dot(lowercase__ , lowercase__ ) / (norm(lowercase__ ) * norm(lowercase__ )) if __name__ == "__main__": import doctest doctest.testmod()
96
1
"""simple docstring""" from manim import * class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : List[str] = Rectangle(height=0.5 , width=0.5 ) _lowerCamelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _lowerCamelCase : int = [mem.copy() for i in range(6 )] _lowerCamelCase : List[str] = [mem.copy() for i in range(6 )] _lowerCamelCase : int = VGroup(*lowercase ).arrange(lowercase , buff=0 ) _lowerCamelCase : List[str] = VGroup(*lowercase ).arrange(lowercase , buff=0 ) _lowerCamelCase : Optional[Any] = VGroup(lowercase , lowercase ).arrange(lowercase , buff=0 ) _lowerCamelCase : str = Text('CPU' , font_size=24 ) _lowerCamelCase : Any = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowercase ) _lowerCamelCase : Dict = [mem.copy() for i in range(4 )] _lowerCamelCase : Tuple = VGroup(*lowercase ).arrange(lowercase , buff=0 ) _lowerCamelCase : Union[str, Any] = Text('GPU' , font_size=24 ) _lowerCamelCase : List[Any] = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase ) gpu.move_to([-1, -1, 0] ) self.add(lowercase ) _lowerCamelCase : List[Any] = [mem.copy() for i in range(6 )] _lowerCamelCase : Any = VGroup(*lowercase ).arrange(lowercase , buff=0 ) _lowerCamelCase : Union[str, Any] = Text('Model' , font_size=24 ) _lowerCamelCase : Tuple = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase ) model.move_to([3, -1.0, 0] ) self.add(lowercase ) _lowerCamelCase : Tuple = [] for i, rect in enumerate(lowercase ): rect.set_stroke(lowercase ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) _lowerCamelCase : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=lowercase , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase , buff=0.0 ) self.add(lowercase ) cpu_targs.append(lowercase ) _lowerCamelCase : List[str] = [mem.copy() for i in range(6 )] _lowerCamelCase : Optional[Any] = VGroup(*lowercase ).arrange(lowercase , buff=0 ) _lowerCamelCase : Any = Text('Loaded Checkpoint' , font_size=24 ) _lowerCamelCase : Any = Group(lowercase , lowercase ).arrange(lowercase , aligned_edge=lowercase , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) _lowerCamelCase : List[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _lowerCamelCase : Any = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowercase , lowercase ) _lowerCamelCase : Optional[Any] = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) _lowerCamelCase : Optional[int] = MarkupText( F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowercase ) , Write(lowercase ) ) self.play(Write(lowercase , run_time=1 ) , Create(lowercase , run_time=1 ) ) _lowerCamelCase : List[str] = [] _lowerCamelCase : Any = [] for i, rect in enumerate(lowercase ): _lowerCamelCase : str = fill.copy().set_fill(lowercase , opacity=0.7 ) target.move_to(lowercase ) first_animations.append(GrowFromCenter(lowercase , run_time=1 ) ) _lowerCamelCase : Union[str, Any] = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(lowercase , run_time=1.5 ) ) self.play(*lowercase ) self.play(*lowercase ) self.wait()
96
"""simple docstring""" import socket def _snake_case ( ): _lowerCamelCase : List[Any] = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) _lowerCamelCase : Union[str, Any] = socket.gethostname() _lowerCamelCase : List[Any] = 12312 sock.connect((host, port) ) sock.send(B'Hello server!' ) with open('Received_file' , 'wb' ) as out_file: print('File opened' ) print('Receiving data...' ) while True: _lowerCamelCase : int = sock.recv(1024 ) if not data: break out_file.write(lowercase__ ) print('Successfully received the file' ) sock.close() print('Connection closed' ) if __name__ == "__main__": main()
96
1
"""simple docstring""" import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def _snake_case ( lowercase__=None , lowercase__=None ): return field(default_factory=lambda: default , metadata=lowercase__ ) @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = field( metadata={"""help""": """The csv file to plot."""}, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""}, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""}, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """Disable logarithmic scale when plotting"""}, ) lowerCamelCase__ = field( default=lowercase, metadata={ """help""": """Whether the csv file has training results or inference results. Defaults to inference results.""" }, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""}, ) lowerCamelCase__ = list_field( default=lowercase, metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} ) def _snake_case ( lowercase__ ): try: int(lowercase__ ) return True except ValueError: return False def _snake_case ( lowercase__ ): try: float(lowercase__ ) return True except ValueError: return False class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase ): _lowerCamelCase : Any = args _lowerCamelCase : Tuple = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='' ) as csv_file: _lowerCamelCase : Optional[Any] = csv.DictReader(lowercase ) for row in reader: _lowerCamelCase : Optional[Any] = row['model'] self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) ) self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) ) if can_convert_to_int(row['result'] ): # value is not None _lowerCamelCase : List[str] = int(row['result'] ) elif can_convert_to_float(row['result'] ): # value is not None _lowerCamelCase : List[Any] = float(row['result'] ) def A_ ( self ): _lowerCamelCase, _lowerCamelCase : Tuple = plt.subplots() _lowerCamelCase : Tuple = 'Time usage' if self.args.is_time else 'Memory usage' _lowerCamelCase : Optional[int] = title_str + ' for training' if self.args.is_train else title_str + ' for inference' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('log' ) ax.set_yscale('log' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): _lowerCamelCase : str = sorted(set(self.result_dict[model_name]['bsz'] ) ) _lowerCamelCase : Union[str, Any] = sorted(set(self.result_dict[model_name]['seq_len'] ) ) _lowerCamelCase : Tuple = self.result_dict[model_name]['result'] ((_lowerCamelCase), (_lowerCamelCase)) : Optional[int] = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) _lowerCamelCase : Union[str, Any] = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: _lowerCamelCase : Optional[Any] = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowercase , ) else: _lowerCamelCase : Optional[int] = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((_lowerCamelCase), (_lowerCamelCase)) : int = ( ('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz') ) _lowerCamelCase : List[Any] = np.asarray(lowercase , lowercase )[: len(lowercase )] plt.scatter( lowercase , lowercase , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' ) plt.plot(lowercase , lowercase , '--' ) title_str += F''' {label_model_name} vs.''' _lowerCamelCase : Dict = title_str[:-4] _lowerCamelCase : List[Any] = 'Time in s' if self.args.is_time else 'Memory in MB' # plot plt.title(lowercase ) plt.xlabel(lowercase ) plt.ylabel(lowercase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def _snake_case ( ): _lowerCamelCase : Any = HfArgumentParser(lowercase__ ) _lowerCamelCase : List[Any] = parser.parse_args_into_dataclasses()[0] _lowerCamelCase : Dict = Plot(args=lowercase__ ) plot.plot() if __name__ == "__main__": main()
96
"""simple docstring""" from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record lowercase__ = """\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } """ lowercase__ = """\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. """ lowercase__ = """ Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for 'record': list of question-answer dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'prediction_text': the predicted answer text - for 'multirc': list of question-answer dictionaries with the following keys: - 'idx': index of the question-answer pair as specified by the dataset - 'prediction': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for 'record': list of question-answers dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'answers': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for 'record': - 'exact_match': Exact match between answer and gold answer - 'f1': F1 score - for 'multirc': - 'exact_match': Exact match between answer and gold answer - 'f1_m': Per-question macro-F1 score - 'f1_a': Average F1 score over all answers - for 'axb': 'matthews_correlation': Matthew Correlation - for 'cb': - 'accuracy': Accuracy - 'f1': F1 score - for all others: - 'accuracy': Accuracy Examples: >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'cb') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'record') >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}] >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc') >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'axb') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'matthews_correlation': 1.0} """ def _snake_case ( lowercase__ , lowercase__ ): return float((preds == labels).mean() ) def _snake_case ( lowercase__ , lowercase__ , lowercase__="binary" ): _lowerCamelCase : str = simple_accuracy(lowercase__ , lowercase__ ) _lowerCamelCase : Any = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ , average=lowercase__ ) ) return { "accuracy": acc, "f1": fa, } def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : Any = {} for id_pred, label in zip(lowercase__ , lowercase__ ): _lowerCamelCase : Tuple = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}''' _lowerCamelCase : Union[str, Any] = id_pred['prediction'] if question_id in question_map: question_map[question_id].append((pred, label) ) else: _lowerCamelCase : Optional[Any] = [(pred, label)] _lowerCamelCase, _lowerCamelCase : Optional[int] = [], [] for question, preds_labels in question_map.items(): _lowerCamelCase, _lowerCamelCase : Tuple = zip(*lowercase__ ) _lowerCamelCase : List[str] = fa_score(y_true=lowercase__ , y_pred=lowercase__ , average='macro' ) fas.append(lowercase__ ) _lowerCamelCase : int = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase__ ) ) ems.append(lowercase__ ) _lowerCamelCase : Optional[Any] = float(sum(lowercase__ ) / len(lowercase__ ) ) _lowerCamelCase : Optional[int] = sum(lowercase__ ) / len(lowercase__ ) _lowerCamelCase : List[Any] = float(fa_score(y_true=lowercase__ , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def A_ ( self ): if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( 'You should supply a configuration name selected in ' '["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , ) def A_ ( self ): if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value('int64' ), "query": datasets.Value('int64' ), }, "prediction_text": datasets.Value('string' ), }, "references": { "idx": { "passage": datasets.Value('int64' ), "query": datasets.Value('int64' ), }, "answers": datasets.Sequence(datasets.Value('string' ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value('int64' ), "paragraph": datasets.Value('int64' ), "question": datasets.Value('int64' ), }, "prediction": datasets.Value('int64' ), }, "references": datasets.Value('int64' ), } else: return { "predictions": datasets.Value('int64' ), "references": datasets.Value('int64' ), } def A_ ( self , lowercase , lowercase ): if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )} elif self.config_name == "cb": return acc_and_fa(lowercase , lowercase , fa_avg='macro' ) elif self.config_name == "record": _lowerCamelCase : List[str] = [ { 'qas': [ {'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]} for ref in references ] } ] _lowerCamelCase : Union[str, Any] = {pred['idx']['query']: pred['prediction_text'] for pred in predictions} return evaluate_record(lowercase , lowercase )[0] elif self.config_name == "multirc": return evaluate_multirc(lowercase , lowercase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(lowercase , lowercase )} else: raise KeyError( 'You should supply a configuration name selected in ' '["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
96
1
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = ["""image_processor""", """tokenizer"""] lowerCamelCase__ = """BlipImageProcessor""" lowerCamelCase__ = """AutoTokenizer""" def __init__( self , lowercase , lowercase , lowercase ): super().__init__(lowercase , lowercase ) # add QFormer tokenizer _lowerCamelCase : int = qformer_tokenizer def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ): if images is None and text is None: raise ValueError('You have to specify at least images or text.' ) _lowerCamelCase : int = BatchFeature() if text is not None: _lowerCamelCase : List[str] = self.tokenizer( text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) encoding.update(lowercase ) _lowerCamelCase : List[str] = self.qformer_tokenizer( text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) _lowerCamelCase : List[Any] = qformer_text_encoding.pop('input_ids' ) _lowerCamelCase : Tuple = qformer_text_encoding.pop('attention_mask' ) if images is not None: _lowerCamelCase : int = self.image_processor(lowercase , return_tensors=lowercase ) encoding.update(lowercase ) return encoding def A_ ( self , *lowercase , **lowercase ): return self.tokenizer.batch_decode(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.tokenizer.decode(*lowercase , **lowercase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def A_ ( self ): _lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names _lowerCamelCase : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def A_ ( self , lowercase , **lowercase ): if os.path.isfile(lowercase ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(lowercase , exist_ok=lowercase ) _lowerCamelCase : Optional[Any] = os.path.join(lowercase , 'qformer_tokenizer' ) self.qformer_tokenizer.save_pretrained(lowercase ) return super().save_pretrained(lowercase , **lowercase ) @classmethod def A_ ( cls , lowercase , **lowercase ): _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , subfolder='qformer_tokenizer' ) _lowerCamelCase : Dict = cls._get_arguments_from_pretrained(lowercase , **lowercase ) args.append(lowercase ) return cls(*lowercase )
96
"""simple docstring""" import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = DDIMPipeline lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS lowerCamelCase__ = PipelineTesterMixin.required_optional_params - { """num_images_per_prompt""", """latents""", """callback""", """callback_steps""", } lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS lowerCamelCase__ = False def A_ ( self ): torch.manual_seed(0 ) _lowerCamelCase : List[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) _lowerCamelCase : List[str] = DDIMScheduler() _lowerCamelCase : Optional[int] = {'unet': unet, 'scheduler': scheduler} return components def A_ ( self , lowercase , lowercase=0 ): if str(lowercase ).startswith('mps' ): _lowerCamelCase : Dict = torch.manual_seed(lowercase ) else: _lowerCamelCase : List[str] = torch.Generator(device=lowercase ).manual_seed(lowercase ) _lowerCamelCase : Tuple = { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def A_ ( self ): _lowerCamelCase : Any = 'cpu' _lowerCamelCase : Tuple = self.get_dummy_components() _lowerCamelCase : Optional[Any] = self.pipeline_class(**lowercase ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : str = self.get_dummy_inputs(lowercase ) _lowerCamelCase : int = pipe(**lowercase ).images _lowerCamelCase : Any = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3) ) _lowerCamelCase : Tuple = np.array( [1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] ) _lowerCamelCase : str = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase , 1E-3 ) def A_ ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def A_ ( self ): super().test_save_load_local(expected_max_difference=3E-3 ) def A_ ( self ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def A_ ( self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : Optional[Any] = 'google/ddpm-cifar10-32' _lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(lowercase ) _lowerCamelCase : Dict = DDIMScheduler() _lowerCamelCase : Dict = DDIMPipeline(unet=lowercase , scheduler=lowercase ) ddim.to(lowercase ) ddim.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : List[str] = torch.manual_seed(0 ) _lowerCamelCase : str = ddim(generator=lowercase , eta=0.0 , output_type='numpy' ).images _lowerCamelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _lowerCamelCase : List[Any] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A_ ( self ): _lowerCamelCase : Optional[int] = 'google/ddpm-ema-bedroom-256' _lowerCamelCase : str = UNetaDModel.from_pretrained(lowercase ) _lowerCamelCase : str = DDIMScheduler.from_pretrained(lowercase ) _lowerCamelCase : Optional[int] = DDIMPipeline(unet=lowercase , scheduler=lowercase ) ddpm.to(lowercase ) ddpm.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : Tuple = torch.manual_seed(0 ) _lowerCamelCase : int = ddpm(generator=lowercase , output_type='numpy' ).images _lowerCamelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _lowerCamelCase : str = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
96
1
"""simple docstring""" import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = DDIMPipeline lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS lowerCamelCase__ = PipelineTesterMixin.required_optional_params - { """num_images_per_prompt""", """latents""", """callback""", """callback_steps""", } lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS lowerCamelCase__ = False def A_ ( self ): torch.manual_seed(0 ) _lowerCamelCase : List[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) _lowerCamelCase : List[str] = DDIMScheduler() _lowerCamelCase : Optional[int] = {'unet': unet, 'scheduler': scheduler} return components def A_ ( self , lowercase , lowercase=0 ): if str(lowercase ).startswith('mps' ): _lowerCamelCase : Dict = torch.manual_seed(lowercase ) else: _lowerCamelCase : List[str] = torch.Generator(device=lowercase ).manual_seed(lowercase ) _lowerCamelCase : Tuple = { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def A_ ( self ): _lowerCamelCase : Any = 'cpu' _lowerCamelCase : Tuple = self.get_dummy_components() _lowerCamelCase : Optional[Any] = self.pipeline_class(**lowercase ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : str = self.get_dummy_inputs(lowercase ) _lowerCamelCase : int = pipe(**lowercase ).images _lowerCamelCase : Any = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3) ) _lowerCamelCase : Tuple = np.array( [1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] ) _lowerCamelCase : str = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase , 1E-3 ) def A_ ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def A_ ( self ): super().test_save_load_local(expected_max_difference=3E-3 ) def A_ ( self ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def A_ ( self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : Optional[Any] = 'google/ddpm-cifar10-32' _lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(lowercase ) _lowerCamelCase : Dict = DDIMScheduler() _lowerCamelCase : Dict = DDIMPipeline(unet=lowercase , scheduler=lowercase ) ddim.to(lowercase ) ddim.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : List[str] = torch.manual_seed(0 ) _lowerCamelCase : str = ddim(generator=lowercase , eta=0.0 , output_type='numpy' ).images _lowerCamelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _lowerCamelCase : List[Any] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A_ ( self ): _lowerCamelCase : Optional[int] = 'google/ddpm-ema-bedroom-256' _lowerCamelCase : str = UNetaDModel.from_pretrained(lowercase ) _lowerCamelCase : str = DDIMScheduler.from_pretrained(lowercase ) _lowerCamelCase : Optional[int] = DDIMPipeline(unet=lowercase , scheduler=lowercase ) ddpm.to(lowercase ) ddpm.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : Tuple = torch.manual_seed(0 ) _lowerCamelCase : int = ddpm(generator=lowercase , output_type='numpy' ).images _lowerCamelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _lowerCamelCase : str = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
96
"""simple docstring""" # Imports import numpy as np class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ): self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase ) def A_ ( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ): if red is not None: _lowerCamelCase : Optional[int] = red if green is not None: _lowerCamelCase : Optional[Any] = green if blue is not None: _lowerCamelCase : Tuple = blue if red_edge is not None: _lowerCamelCase : Optional[Any] = red_edge if nir is not None: _lowerCamelCase : Union[str, Any] = nir return True def A_ ( self , lowercase="" , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ): self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase ) _lowerCamelCase : str = { 'ARVI2': self.arvaa, 'CCCI': self.ccci, 'CVI': self.cvi, 'GLI': self.gli, 'NDVI': self.ndvi, 'BNDVI': self.bndvi, 'redEdgeNDVI': self.red_edge_ndvi, 'GNDVI': self.gndvi, 'GBNDVI': self.gbndvi, 'GRNDVI': self.grndvi, 'RBNDVI': self.rbndvi, 'PNDVI': self.pndvi, 'ATSAVI': self.atsavi, 'BWDRVI': self.bwdrvi, 'CIgreen': self.ci_green, 'CIrededge': self.ci_rededge, 'CI': self.ci, 'CTVI': self.ctvi, 'GDVI': self.gdvi, 'EVI': self.evi, 'GEMI': self.gemi, 'GOSAVI': self.gosavi, 'GSAVI': self.gsavi, 'Hue': self.hue, 'IVI': self.ivi, 'IPVI': self.ipvi, 'I': self.i, 'RVI': self.rvi, 'MRVI': self.mrvi, 'MSAVI': self.m_savi, 'NormG': self.norm_g, 'NormNIR': self.norm_nir, 'NormR': self.norm_r, 'NGRDI': self.ngrdi, 'RI': self.ri, 'S': self.s, 'IF': self._if, 'DVI': self.dvi, 'TVI': self.tvi, 'NDRE': self.ndre, } try: return funcs[index]() except KeyError: print('Index not in the list!' ) return False def A_ ( self ): return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def A_ ( self ): return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def A_ ( self ): return self.nir * (self.red / (self.green**2)) def A_ ( self ): return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def A_ ( self ): return (self.nir - self.red) / (self.nir + self.red) def A_ ( self ): return (self.nir - self.blue) / (self.nir + self.blue) def A_ ( self ): return (self.redEdge - self.red) / (self.redEdge + self.red) def A_ ( self ): return (self.nir - self.green) / (self.nir + self.green) def A_ ( self ): return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def A_ ( self ): return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def A_ ( self ): return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def A_ ( self ): return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def A_ ( self , lowercase=0.08 , lowercase=1.22 , lowercase=0.03 ): return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def A_ ( self ): return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def A_ ( self ): return (self.nir / self.green) - 1 def A_ ( self ): return (self.nir / self.redEdge) - 1 def A_ ( self ): return (self.red - self.blue) / self.red def A_ ( self ): _lowerCamelCase : Any = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def A_ ( self ): return self.nir - self.green def A_ ( self ): return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def A_ ( self ): _lowerCamelCase : Any = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red) def A_ ( self , lowercase=0.16 ): return (self.nir - self.green) / (self.nir + self.green + y) def A_ ( self , lowercase=0.5 ): return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def A_ ( self ): return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) def A_ ( self , lowercase=None , lowercase=None ): return (self.nir - b) / (a * self.red) def A_ ( self ): return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def A_ ( self ): return (self.red + self.green + self.blue) / 30.5 def A_ ( self ): return self.nir / self.red def A_ ( self ): return (self.rvi() - 1) / (self.rvi() + 1) def A_ ( self ): return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def A_ ( self ): return self.green / (self.nir + self.red + self.green) def A_ ( self ): return self.nir / (self.nir + self.red + self.green) def A_ ( self ): return self.red / (self.nir + self.red + self.green) def A_ ( self ): return (self.green - self.red) / (self.green + self.red) def A_ ( self ): return (self.red - self.green) / (self.red + self.green) def A_ ( self ): _lowerCamelCase : Union[str, Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) _lowerCamelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def A_ ( self ): return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def A_ ( self ): return self.nir / self.red def A_ ( self ): return (self.ndvi() + 0.5) ** (1 / 2) def A_ ( self ): return (self.nir - self.redEdge) / (self.nir + self.redEdge)
96
1
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowercase__ = """pt""" elif is_tf_available(): lowercase__ = """tf""" else: lowercase__ = """jax""" class lowerCAmelCase__ ( lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = PerceiverTokenizer lowerCamelCase__ = False def A_ ( self ): super().setUp() _lowerCamelCase : Optional[Any] = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def A_ ( self ): return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def A_ ( self , **lowercase ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase ) def A_ ( self , lowercase , lowercase=False , lowercase=20 , lowercase=5 ): # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. _lowerCamelCase : List[Any] = [] for i in range(len(lowercase ) ): try: _lowerCamelCase : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase ) except UnicodeDecodeError: pass toks.append((i, tok) ) _lowerCamelCase : Optional[int] = list(filter(lambda lowercase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , lowercase ) ) _lowerCamelCase : Any = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) ) if max_length is not None and len(lowercase ) > max_length: _lowerCamelCase : List[str] = toks[:max_length] if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0: while len(lowercase ) < min_length: _lowerCamelCase : int = toks + toks # toks_str = [t[1] for t in toks] _lowerCamelCase : str = [t[0] for t in toks] # Ensure consistency _lowerCamelCase : Dict = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) if " " not in output_txt and len(lowercase ) > 1: _lowerCamelCase : str = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase ) ) if with_prefix_space: _lowerCamelCase : Optional[Any] = ' ' + output_txt _lowerCamelCase : Tuple = tokenizer.encode(lowercase , add_special_tokens=lowercase ) return output_txt, output_ids def A_ ( self ): _lowerCamelCase : Optional[Any] = self.perceiver_tokenizer _lowerCamelCase : Dict = 'Unicode €.' _lowerCamelCase : int = tokenizer(lowercase ) _lowerCamelCase : Tuple = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['input_ids'] , lowercase ) # decoding _lowerCamelCase : Optional[int] = tokenizer.decode(lowercase ) self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' ) _lowerCamelCase : Union[str, Any] = tokenizer('e è é ê ë' ) _lowerCamelCase : Tuple = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['input_ids'] , lowercase ) # decoding _lowerCamelCase : int = tokenizer.decode(lowercase ) self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' ) def A_ ( self ): _lowerCamelCase : Optional[Any] = self.perceiver_tokenizer _lowerCamelCase : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off _lowerCamelCase : List[Any] = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on _lowerCamelCase : Dict = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase ) self.assertIsInstance(lowercase , lowercase ) if FRAMEWORK != "jax": _lowerCamelCase : Optional[Any] = list(batch.input_ids.numpy()[0] ) else: _lowerCamelCase : Union[str, Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(lowercase , lowercase ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def A_ ( self ): _lowerCamelCase : List[Any] = self.perceiver_tokenizer _lowerCamelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _lowerCamelCase : List[str] = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , lowercase ) self.assertIn('attention_mask' , lowercase ) self.assertNotIn('decoder_input_ids' , lowercase ) self.assertNotIn('decoder_attention_mask' , lowercase ) def A_ ( self ): _lowerCamelCase : str = self.perceiver_tokenizer _lowerCamelCase : Optional[int] = [ 'Summary of the text.', 'Another summary.', ] _lowerCamelCase : Optional[int] = tokenizer( text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def A_ ( self ): # safety check on max_len default value so we are sure the test works _lowerCamelCase : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test _lowerCamelCase : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _lowerCamelCase : Optional[Any] = tempfile.mkdtemp() _lowerCamelCase : List[str] = ' He is very happy, UNwant\u00E9d,running' _lowerCamelCase : List[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase ) tokenizer.save_pretrained(lowercase ) _lowerCamelCase : Any = tokenizer.__class__.from_pretrained(lowercase ) _lowerCamelCase : Optional[int] = after_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) shutil.rmtree(lowercase ) _lowerCamelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _lowerCamelCase : Union[str, Any] = tempfile.mkdtemp() _lowerCamelCase : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) _lowerCamelCase : Dict = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) _lowerCamelCase : Optional[int] = tokenizer.encode(lowercase , add_special_tokens=lowercase ) tokenizer.save_pretrained(lowercase ) _lowerCamelCase : str = tokenizer.__class__.from_pretrained(lowercase ) _lowerCamelCase : Any = after_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) _lowerCamelCase : List[Any] = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(lowercase ) def A_ ( self ): _lowerCamelCase : Optional[int] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowercase ) with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: _lowerCamelCase : Tuple = json.load(lowercase ) with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: _lowerCamelCase : List[str] = json.load(lowercase ) _lowerCamelCase : Optional[int] = [F'''<extra_id_{i}>''' for i in range(125 )] _lowerCamelCase : List[Any] = added_tokens_extra_ids + [ 'an_additional_special_token' ] _lowerCamelCase : Tuple = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(lowercase , lowercase ) with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(lowercase , lowercase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _lowerCamelCase : Optional[int] = tokenizer_class.from_pretrained( lowercase , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _lowerCamelCase : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )] _lowerCamelCase : Any = tokenizer_class.from_pretrained( lowercase , additional_special_tokens=lowercase , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def A_ ( self ): _lowerCamelCase : Optional[int] = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , '�' ) def A_ ( self ): pass def A_ ( self ): pass def A_ ( self ): pass def A_ ( self ): pass def A_ ( self ): # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens _lowerCamelCase : List[Any] = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _lowerCamelCase : Dict = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] _lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_string(lowercase ) self.assertIsInstance(lowercase , lowercase )
96
"""simple docstring""" import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , lowercase , lowercase=768 ): super().__init__(lowercase ) _lowerCamelCase : Any = proj_size _lowerCamelCase : Dict = CLIPVisionModel(lowercase ) _lowerCamelCase : List[str] = PaintByExampleMapper(lowercase ) _lowerCamelCase : Optional[Any] = nn.LayerNorm(config.hidden_size ) _lowerCamelCase : int = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling _lowerCamelCase : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def A_ ( self , lowercase , lowercase=False ): _lowerCamelCase : Union[str, Any] = self.model(pixel_values=lowercase ) _lowerCamelCase : int = clip_output.pooler_output _lowerCamelCase : str = self.mapper(latent_states[:, None] ) _lowerCamelCase : List[Any] = self.final_layer_norm(lowercase ) _lowerCamelCase : Dict = self.proj_out(lowercase ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self , lowercase ): super().__init__() _lowerCamelCase : Tuple = (config.num_hidden_layers + 1) // 5 _lowerCamelCase : int = config.hidden_size _lowerCamelCase : Optional[Any] = 1 _lowerCamelCase : str = nn.ModuleList( [ BasicTransformerBlock(lowercase , lowercase , lowercase , activation_fn='gelu' , attention_bias=lowercase ) for _ in range(lowercase ) ] ) def A_ ( self , lowercase ): for block in self.blocks: _lowerCamelCase : Tuple = block(lowercase ) return hidden_states
96
1
"""simple docstring""" import numpy as np import qiskit def _snake_case ( lowercase__ = 8 , lowercase__ = None ): _lowerCamelCase : str = np.random.default_rng(seed=lowercase__ ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. _lowerCamelCase : List[str] = 6 * key_len # Measurement basis for Alice's qubits. _lowerCamelCase : int = rng.integers(2 , size=lowercase__ ) # The set of states Alice will prepare. _lowerCamelCase : str = rng.integers(2 , size=lowercase__ ) # Measurement basis for Bob's qubits. _lowerCamelCase : str = rng.integers(2 , size=lowercase__ ) # Quantum Circuit to simulate BB84 _lowerCamelCase : Dict = qiskit.QuantumCircuit(lowercase__ , name='BB84' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(lowercase__ ): if alice_state[index] == 1: bbaa_circ.x(lowercase__ ) if alice_basis[index] == 1: bbaa_circ.h(lowercase__ ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(lowercase__ ): if bob_basis[index] == 1: bbaa_circ.h(lowercase__ ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. _lowerCamelCase : List[str] = qiskit.Aer.get_backend('aer_simulator' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. _lowerCamelCase : List[Any] = qiskit.execute(lowercase__ , lowercase__ , shots=1 , seed_simulator=lowercase__ ) # Returns the result of measurement. _lowerCamelCase : Optional[Any] = job.result().get_counts(lowercase__ ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. _lowerCamelCase : Optional[int] = ''.join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( lowercase__ , lowercase__ , lowercase__ ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. _lowerCamelCase : Union[str, Any] = gen_key[:key_len] if len(lowercase__ ) >= key_len else gen_key.ljust(lowercase__ , '0' ) return key if __name__ == "__main__": print(F"The generated key is : {bbaa(8, seed=0)}") from doctest import testmod testmod()
96
"""simple docstring""" lowercase__ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) lowercase__ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : List[Any] = from_type.lower().strip('s' ) _lowerCamelCase : List[Any] = to_type.lower().strip('s' ) _lowerCamelCase : Optional[int] = UNIT_SYMBOL.get(lowercase__ , lowercase__ ) _lowerCamelCase : Any = UNIT_SYMBOL.get(lowercase__ , lowercase__ ) if from_sanitized not in METRIC_CONVERSION: _lowerCamelCase : Tuple = ( f'''Invalid \'from_type\' value: {from_type!r}.\n''' f'''Conversion abbreviations are: {', '.join(lowercase__ )}''' ) raise ValueError(lowercase__ ) if to_sanitized not in METRIC_CONVERSION: _lowerCamelCase : Any = ( f'''Invalid \'to_type\' value: {to_type!r}.\n''' f'''Conversion abbreviations are: {', '.join(lowercase__ )}''' ) raise ValueError(lowercase__ ) _lowerCamelCase : List[Any] = METRIC_CONVERSION[from_sanitized] _lowerCamelCase : int = METRIC_CONVERSION[to_sanitized] _lowerCamelCase : List[str] = 1 if from_exponent > to_exponent: _lowerCamelCase : List[str] = from_exponent - to_exponent else: _lowerCamelCase : List[Any] = -(to_exponent - from_exponent) return value * pow(10 , lowercase__ ) if __name__ == "__main__": from doctest import testmod testmod()
96
1
"""simple docstring""" def _snake_case ( lowercase__ ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
96
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
96
1
"""simple docstring""" def _snake_case ( lowercase__ ): if not isinstance(lowercase__ , lowercase__ ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(lowercase__ ) == 0: raise ValueError('Input list must be a non empty list' ) if len(lowercase__ ) == 1: return True _lowerCamelCase : List[Any] = series[1] - series[0] for index in range(len(lowercase__ ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def _snake_case ( lowercase__ ): if not isinstance(lowercase__ , lowercase__ ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(lowercase__ ) == 0: raise ValueError('Input list must be a non empty list' ) _lowerCamelCase : Optional[int] = 0 for val in series: answer += val return answer / len(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
96
"""simple docstring""" def _snake_case ( lowercase__ , lowercase__ ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) _lowerCamelCase : List[Any] = (boundary[1] - boundary[0]) / steps _lowerCamelCase : Tuple = boundary[0] _lowerCamelCase : Dict = boundary[1] _lowerCamelCase : List[Any] = make_points(lowercase__ , lowercase__ , lowercase__ ) _lowerCamelCase : List[Any] = 0.0 y += (h / 2.0) * f(lowercase__ ) for i in x_i: # print(i) y += h * f(lowercase__ ) y += (h / 2.0) * f(lowercase__ ) return y def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : str = a + h while x < (b - h): yield x _lowerCamelCase : int = x + h def _snake_case ( lowercase__ ): # enter your function here _lowerCamelCase : Optional[Any] = (x - 0) * (x - 0) return y def _snake_case ( ): _lowerCamelCase : int = 0.0 # Lower bound of integration _lowerCamelCase : Optional[int] = 1.0 # Upper bound of integration _lowerCamelCase : List[str] = 1_0.0 # define number of steps or resolution _lowerCamelCase : List[Any] = [a, b] # define boundary of integration _lowerCamelCase : Optional[Any] = method_a(lowercase__ , lowercase__ ) print(f'''y = {y}''' ) if __name__ == "__main__": main()
96
1
"""simple docstring""" def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : int = 0 _lowerCamelCase : List[str] = len(lowercase__ ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None _lowerCamelCase : Any = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(lowercase__ ): return None _lowerCamelCase : str = sorted_collection[point] if current_item == item: return point else: if point < left: _lowerCamelCase : Union[str, Any] = left _lowerCamelCase : int = point elif point > right: _lowerCamelCase : Any = right _lowerCamelCase : List[str] = point else: if item < current_item: _lowerCamelCase : List[Any] = point - 1 else: _lowerCamelCase : str = point + 1 return None def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None _lowerCamelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(lowercase__ ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) elif point > right: return interpolation_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( lowercase__ , lowercase__ , lowercase__ , point - 1 ) else: return interpolation_search_by_recursion( lowercase__ , lowercase__ , point + 1 , lowercase__ ) def _snake_case ( lowercase__ ): if collection != sorted(lowercase__ ): raise ValueError('Collection must be ascending sorted' ) return True if __name__ == "__main__": import sys lowercase__ = 0 if debug == 1: lowercase__ = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit("""Sequence must be ascending sorted to apply interpolation search""") lowercase__ = 67 lowercase__ = interpolation_search(collection, target) if result is not None: print(F"{target} found at positions: {result}") else: print("""Not found""")
96
"""simple docstring""" import math def _snake_case ( lowercase__ ): return math.sqrt(lowercase__ ) * math.sqrt(lowercase__ ) == num def _snake_case ( lowercase__ ): _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : List[Any] = n while left <= right: _lowerCamelCase : str = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: _lowerCamelCase : str = mid - 1 else: _lowerCamelCase : Optional[int] = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
96
1
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ = { """configuration_autoformer""": [ """AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AutoformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """AutoformerForPrediction""", """AutoformerModel""", """AutoformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
96
"""simple docstring""" import functools from typing import Any def _snake_case ( lowercase__ , lowercase__ ): # Validation if not isinstance(lowercase__ , lowercase__ ) or len(lowercase__ ) == 0: raise ValueError('the string should be not empty string' ) if not isinstance(lowercase__ , lowercase__ ) or not all( isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0 for item in words ): raise ValueError('the words should be a list of non-empty strings' ) # Build trie _lowerCamelCase : dict[str, Any] = {} _lowerCamelCase : List[Any] = 'WORD_KEEPER' for word in words: _lowerCamelCase : Dict = trie for c in word: if c not in trie_node: _lowerCamelCase : Any = {} _lowerCamelCase : str = trie_node[c] _lowerCamelCase : Optional[Any] = True _lowerCamelCase : Dict = len(lowercase__ ) # Dynamic programming method @functools.cache def is_breakable(lowercase__ ) -> bool: if index == len_string: return True _lowerCamelCase : List[Any] = trie for i in range(lowercase__ , lowercase__ ): _lowerCamelCase : Any = trie_node.get(string[i] , lowercase__ ) if trie_node is None: return False if trie_node.get(lowercase__ , lowercase__ ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
96
1
"""simple docstring""" import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase , ): super().__init__() _lowerCamelCase : Any = value_function _lowerCamelCase : str = unet _lowerCamelCase : Union[str, Any] = scheduler _lowerCamelCase : Optional[Any] = env _lowerCamelCase : Tuple = env.get_dataset() _lowerCamelCase : str = {} for key in self.data.keys(): try: _lowerCamelCase : List[str] = self.data[key].mean() except: # noqa: E722 pass _lowerCamelCase : List[Any] = {} for key in self.data.keys(): try: _lowerCamelCase : List[str] = self.data[key].std() except: # noqa: E722 pass _lowerCamelCase : int = env.observation_space.shape[0] _lowerCamelCase : str = env.action_space.shape[0] def A_ ( self , lowercase , lowercase ): return (x_in - self.means[key]) / self.stds[key] def A_ ( self , lowercase , lowercase ): return x_in * self.stds[key] + self.means[key] def A_ ( self , lowercase ): if type(lowercase ) is dict: return {k: self.to_torch(lowercase ) for k, v in x_in.items()} elif torch.is_tensor(lowercase ): return x_in.to(self.unet.device ) return torch.tensor(lowercase , device=self.unet.device ) def A_ ( self , lowercase , lowercase , lowercase ): for key, val in cond.items(): _lowerCamelCase : Dict = val.clone() return x_in def A_ ( self , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : Optional[Any] = x.shape[0] _lowerCamelCase : Any = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model _lowerCamelCase : Tuple = torch.full((batch_size,) , lowercase , device=self.unet.device , dtype=torch.long ) for _ in range(lowercase ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models _lowerCamelCase : str = self.value_function(x.permute(0 , 2 , 1 ) , lowercase ).sample _lowerCamelCase : Optional[int] = torch.autograd.grad([y.sum()] , [x] )[0] _lowerCamelCase : Optional[int] = self.scheduler._get_variance(lowercase ) _lowerCamelCase : List[Any] = torch.exp(0.5 * posterior_variance ) _lowerCamelCase : Optional[Any] = model_std * grad _lowerCamelCase : Tuple = 0 _lowerCamelCase : int = x.detach() _lowerCamelCase : List[str] = x + scale * grad _lowerCamelCase : Tuple = self.reset_xa(lowercase , lowercase , self.action_dim ) _lowerCamelCase : Tuple = self.unet(x.permute(0 , 2 , 1 ) , lowercase ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg _lowerCamelCase : List[str] = self.scheduler.step(lowercase , lowercase , lowercase , predict_epsilon=lowercase )['prev_sample'] # apply conditions to the trajectory (set the initial state) _lowerCamelCase : Any = self.reset_xa(lowercase , lowercase , self.action_dim ) _lowerCamelCase : Union[str, Any] = self.to_torch(lowercase ) return x, y def __call__( self , lowercase , lowercase=64 , lowercase=32 , lowercase=2 , lowercase=0.1 ): # normalize the observations and create batch dimension _lowerCamelCase : List[str] = self.normalize(lowercase , 'observations' ) _lowerCamelCase : Dict = obs[None].repeat(lowercase , axis=0 ) _lowerCamelCase : Dict = {0: self.to_torch(lowercase )} _lowerCamelCase : Any = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) _lowerCamelCase : Tuple = randn_tensor(lowercase , device=self.unet.device ) _lowerCamelCase : int = self.reset_xa(lowercase , lowercase , self.action_dim ) _lowerCamelCase : Dict = self.to_torch(lowercase ) # run the diffusion process _lowerCamelCase, _lowerCamelCase : Any = self.run_diffusion(lowercase , lowercase , lowercase , lowercase ) # sort output trajectories by value _lowerCamelCase : List[Any] = y.argsort(0 , descending=lowercase ).squeeze() _lowerCamelCase : Union[str, Any] = x[sorted_idx] _lowerCamelCase : Optional[int] = sorted_values[:, :, : self.action_dim] _lowerCamelCase : int = actions.detach().cpu().numpy() _lowerCamelCase : Union[str, Any] = self.de_normalize(lowercase , key='actions' ) # select the action with the highest value if y is not None: _lowerCamelCase : int = 0 else: # if we didn't run value guiding, select a random action _lowerCamelCase : List[str] = np.random.randint(0 , lowercase ) _lowerCamelCase : Union[str, Any] = denorm_actions[selected_index, 0] return denorm_actions
96
"""simple docstring""" def _snake_case ( lowercase__ ): if not isinstance(lowercase__ , lowercase__ ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(lowercase__ ) == 0: raise ValueError('Input list must be a non empty list' ) if len(lowercase__ ) == 1: return True _lowerCamelCase : List[Any] = series[1] - series[0] for index in range(len(lowercase__ ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def _snake_case ( lowercase__ ): if not isinstance(lowercase__ , lowercase__ ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(lowercase__ ) == 0: raise ValueError('Input list must be a non empty list' ) _lowerCamelCase : Optional[int] = 0 for val in series: answer += val return answer / len(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
96
1
"""simple docstring""" from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name def _snake_case ( lowercase__ ): if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(lowercase__ ): return ext raise Exception( f'''Unable to determine file format from file extension {path}. ''' f'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' ) def _snake_case ( lowercase__ ): _lowerCamelCase : Optional[int] = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) _lowerCamelCase : Optional[int] = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format _lowerCamelCase : List[Any] = PipelineDataFormat.from_str( format=lowercase__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(lowercase__ , lowercase__ ) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , lowercase , lowercase ): _lowerCamelCase : List[Any] = nlp _lowerCamelCase : Optional[Any] = reader @staticmethod def A_ ( lowercase ): _lowerCamelCase : Optional[int] = parser.add_parser('run' , help='Run a pipeline through the CLI' ) run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' ) run_parser.add_argument('--input' , type=lowercase , help='Path to the file to use for inference' ) run_parser.add_argument('--output' , type=lowercase , help='Path to the file that will be used post to write results.' ) run_parser.add_argument('--model' , type=lowercase , help='Name or path to the model to instantiate.' ) run_parser.add_argument('--config' , type=lowercase , help='Name or path to the model\'s config to instantiate.' ) run_parser.add_argument( '--tokenizer' , type=lowercase , help='Name of the tokenizer to use. (default: same as the model name)' ) run_parser.add_argument( '--column' , type=lowercase , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , ) run_parser.add_argument( '--format' , type=lowercase , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , ) run_parser.add_argument( '--device' , type=lowercase , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , ) run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' ) run_parser.set_defaults(func=lowercase ) def A_ ( self ): _lowerCamelCase, _lowerCamelCase : Tuple = self._nlp, [] for entry in self._reader: _lowerCamelCase : Any = nlp(**lowercase ) if self._reader.is_multi_columns else nlp(lowercase ) if isinstance(lowercase , lowercase ): outputs.append(lowercase ) else: outputs += output # Saving data if self._nlp.binary_output: _lowerCamelCase : Optional[Any] = self._reader.save_binary(lowercase ) logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' ) else: self._reader.save(lowercase )
96
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowercase__ = 16 lowercase__ = 32 def _snake_case ( lowercase__ , lowercase__ = 16 , lowercase__ = "bert-base-cased" ): _lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(lowercase__ ) _lowerCamelCase : Tuple = load_dataset('glue' , 'mrpc' ) def tokenize_function(lowercase__ ): # max_length=None => use the model max length (it's actually the default) _lowerCamelCase : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase__ , max_length=lowercase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset _lowerCamelCase : int = datasets.map( lowercase__ , batched=lowercase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowercase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowerCamelCase : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(lowercase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowercase__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(lowercase__ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. _lowerCamelCase : List[str] = DataLoader( tokenized_datasets['train'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) _lowerCamelCase : int = DataLoader( tokenized_datasets['validation'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) return train_dataloader, eval_dataloader def _snake_case ( lowercase__ , lowercase__ ): # Initialize accelerator _lowerCamelCase : Optional[int] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCamelCase : Optional[int] = config['lr'] _lowerCamelCase : Optional[int] = int(config['num_epochs'] ) _lowerCamelCase : Union[str, Any] = int(config['seed'] ) _lowerCamelCase : Optional[int] = int(config['batch_size'] ) _lowerCamelCase : Dict = args.model_name_or_path set_seed(lowercase__ ) _lowerCamelCase, _lowerCamelCase : Optional[int] = get_dataloaders(lowercase__ , lowercase__ , lowercase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCamelCase : int = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ ) # Instantiate optimizer _lowerCamelCase : Optional[int] = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) _lowerCamelCase : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=lowercase__ ) if accelerator.state.deepspeed_plugin is not None: _lowerCamelCase : str = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: _lowerCamelCase : Tuple = 1 _lowerCamelCase : List[Any] = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): _lowerCamelCase : Tuple = get_linear_schedule_with_warmup( optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , ) else: _lowerCamelCase : Any = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = accelerator.prepare( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # We need to keep track of how many total steps we have iterated over _lowerCamelCase : Union[str, Any] = 0 # We also need to keep track of the stating epoch so files are named properly _lowerCamelCase : Dict = 0 # Now we train the model _lowerCamelCase : Dict = evaluate.load('glue' , 'mrpc' ) _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : str = {} for epoch in range(lowercase__ , lowercase__ ): model.train() for step, batch in enumerate(lowercase__ ): _lowerCamelCase : List[Any] = model(**lowercase__ ) _lowerCamelCase : int = outputs.loss _lowerCamelCase : Dict = loss / gradient_accumulation_steps accelerator.backward(lowercase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() _lowerCamelCase : Union[str, Any] = 0 for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**lowercase__ ) _lowerCamelCase : Dict = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times _lowerCamelCase, _lowerCamelCase : List[str] = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowercase__ ) - 1: _lowerCamelCase : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen] _lowerCamelCase : Dict = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowercase__ , references=lowercase__ , ) _lowerCamelCase : List[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , lowercase__ ) _lowerCamelCase : Tuple = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: _lowerCamelCase : str = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(lowercase__ , lowercase__ ) def _snake_case ( ): _lowerCamelCase : Any = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=lowercase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowercase__ , ) parser.add_argument( '--output_dir' , type=lowercase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=lowercase__ , default=lowercase__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=lowercase__ , default=3 , help='Number of train epochs.' , ) _lowerCamelCase : Optional[Any] = parser.parse_args() _lowerCamelCase : str = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(lowercase__ , lowercase__ ) if __name__ == "__main__": main()
96
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """IBertForMaskedLM""", """IBertForMultipleChoice""", """IBertForQuestionAnswering""", """IBertForSequenceClassification""", """IBertForTokenClassification""", """IBertModel""", """IBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
96
"""simple docstring""" from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """new-model""" if is_tf_available(): class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = NewModelConfig @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def A_ ( self ): _lowerCamelCase : List[str] = 'bert-base-cased' _lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): _lowerCamelCase : List[str] = 'bert-base-cased' _lowerCamelCase : int = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : int = TFAutoModelForPreTraining.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : int = TFAutoModelForCausalLM.from_pretrained(lowercase ) _lowerCamelCase, _lowerCamelCase : str = TFAutoModelForCausalLM.from_pretrained(lowercase , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : List[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : str = TFAutoModelWithLMHead.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Tuple = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase ) _lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForMaskedLM.from_pretrained(lowercase , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase ) _lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: _lowerCamelCase : str = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def A_ ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: _lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : List[str] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow @require_tensorflow_probability def A_ ( self ): for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: _lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase ) _lowerCamelCase, _lowerCamelCase : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained( lowercase , output_loading_info=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) def A_ ( self ): _lowerCamelCase : int = TFAutoModelWithLMHead.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 ) def A_ ( self ): _lowerCamelCase : Any = TFAutoModelWithLMHead.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 ) def A_ ( self ): # For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel _lowerCamelCase : List[str] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' ) self.assertIsInstance(lowercase , lowercase ) _lowerCamelCase : Optional[int] = copy.deepcopy(model.config ) _lowerCamelCase : Dict = ['FunnelBaseModel'] _lowerCamelCase : List[Any] = TFAutoModel.from_config(lowercase ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowercase ) _lowerCamelCase : Tuple = TFAutoModel.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) def A_ ( self ): try: AutoConfig.register('new-model' , lowercase ) _lowerCamelCase : Tuple = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(lowercase ): auto_class.register(lowercase , lowercase ) auto_class.register(lowercase , lowercase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase ): auto_class.register(lowercase , lowercase ) # Now that the config is registered, it can be used as any other config with the auto-API _lowerCamelCase : Optional[Any] = BertModelTester(self ).get_config() _lowerCamelCase : Dict = NewModelConfig(**tiny_config.to_dict() ) _lowerCamelCase : int = auto_class.from_config(lowercase ) self.assertIsInstance(lowercase , lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowercase ) _lowerCamelCase : List[Any] = auto_class.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def A_ ( self ): with self.assertRaisesRegex( lowercase , 'bert-base is not a local folder and is not a valid model identifier' ): _lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained('bert-base' ) def A_ ( self ): with self.assertRaisesRegex( lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): _lowerCamelCase : str = TFAutoModel.from_pretrained(lowercase , revision='aaaaaa' ) def A_ ( self ): with self.assertRaisesRegex( lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ): _lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' ) def A_ ( self ): with self.assertRaisesRegex(lowercase , 'Use `from_pt=True` to load this model' ): _lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' ) def A_ ( self ): # Make sure we have cached the model. _lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) with RequestCounter() as counter: _lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint _lowerCamelCase : int = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) with RequestCounter() as counter: _lowerCamelCase : List[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
96
1
"""simple docstring""" import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def _snake_case ( lowercase__ ): # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def _snake_case ( ): with parallel_backend('spark' ): assert ParallelBackendConfig.backend_name == "spark" _lowerCamelCase : List[str] = [1, 2, 3] with pytest.raises(lowercase__ ): with parallel_backend('unsupported backend' ): map_nested(lowercase__ , lowercase__ , num_proc=2 ) with pytest.raises(lowercase__ ): with parallel_backend('unsupported backend' ): map_nested(lowercase__ , lowercase__ , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize('num_proc' , [2, -1] ) def _snake_case ( lowercase__ ): _lowerCamelCase : Optional[Any] = [1, 2] _lowerCamelCase : Any = {'a': 1, 'b': 2} _lowerCamelCase : int = {'a': [1, 2], 'b': [3, 4]} _lowerCamelCase : List[Any] = {'a': {'1': 1}, 'b': 2} _lowerCamelCase : List[Any] = {'a': 1, 'b': 2, 'c': 3, 'd': 4} _lowerCamelCase : Any = [2, 3] _lowerCamelCase : str = {'a': 2, 'b': 3} _lowerCamelCase : Dict = {'a': [2, 3], 'b': [4, 5]} _lowerCamelCase : List[str] = {'a': {'1': 2}, 'b': 3} _lowerCamelCase : Optional[Any] = {'a': 2, 'b': 3, 'c': 4, 'd': 5} with parallel_backend('spark' ): assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
96
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """IBertForMaskedLM""", """IBertForMultipleChoice""", """IBertForQuestionAnswering""", """IBertForSequenceClassification""", """IBertForTokenClassification""", """IBertModel""", """IBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
96
1
"""simple docstring""" import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig lowercase__ = logging.get_logger(__name__) lowercase__ = """T5Config""" def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : Union[str, Any] = jnp.zeros_like(lowercase__ ) _lowerCamelCase : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) _lowerCamelCase : List[str] = shifted_input_ids.at[:, 0].set(lowercase__ ) _lowerCamelCase : Dict = jnp.where(shifted_input_ids == -100 , lowercase__ , lowercase__ ) return shifted_input_ids class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """mt5""" lowerCamelCase__ = MTaConfig class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """mt5""" lowerCamelCase__ = MTaConfig class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """mt5""" lowerCamelCase__ = MTaConfig
96
"""simple docstring""" import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : Tuple = f'''{sampling_rate}''' _lowerCamelCase : str = '1' _lowerCamelCase : str = 'f32le' _lowerCamelCase : Union[str, Any] = [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(lowercase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: _lowerCamelCase : str = ffmpeg_process.communicate(lowercase__ ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error _lowerCamelCase : List[Any] = output_stream[0] _lowerCamelCase : Tuple = np.frombuffer(lowercase__ , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def _snake_case ( lowercase__ , lowercase__ , lowercase__ = "f32le" , ): _lowerCamelCase : Optional[Any] = f'''{sampling_rate}''' _lowerCamelCase : List[str] = '1' if format_for_conversion == "s16le": _lowerCamelCase : List[str] = 2 elif format_for_conversion == "f32le": _lowerCamelCase : List[Any] = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) _lowerCamelCase : Dict = platform.system() if system == "Linux": _lowerCamelCase : Optional[int] = 'alsa' _lowerCamelCase : Optional[Any] = 'default' elif system == "Darwin": _lowerCamelCase : Optional[int] = 'avfoundation' _lowerCamelCase : Any = ':0' elif system == "Windows": _lowerCamelCase : Tuple = 'dshow' _lowerCamelCase : Tuple = 'default' _lowerCamelCase : Optional[int] = [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] _lowerCamelCase : Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample _lowerCamelCase : List[Any] = _ffmpeg_stream(lowercase__ , lowercase__ ) for item in iterator: yield item def _snake_case ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = "f32le" , ): if stream_chunk_s is not None: _lowerCamelCase : int = stream_chunk_s else: _lowerCamelCase : Optional[Any] = chunk_length_s _lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowercase__ , lowercase__ , format_for_conversion=lowercase__ ) if format_for_conversion == "s16le": _lowerCamelCase : List[str] = np.intaa _lowerCamelCase : str = 2 elif format_for_conversion == "f32le": _lowerCamelCase : Any = np.floataa _lowerCamelCase : List[Any] = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: _lowerCamelCase : Union[str, Any] = chunk_length_s / 6 _lowerCamelCase : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowercase__ , (int, float) ): _lowerCamelCase : Any = [stride_length_s, stride_length_s] _lowerCamelCase : Tuple = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample _lowerCamelCase : Optional[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample _lowerCamelCase : List[Any] = datetime.datetime.now() _lowerCamelCase : Optional[int] = datetime.timedelta(seconds=lowercase__ ) for item in chunk_bytes_iter(lowercase__ , lowercase__ , stride=(stride_left, stride_right) , stream=lowercase__ ): # Put everything back in numpy scale _lowerCamelCase : List[Any] = np.frombuffer(item['raw'] , dtype=lowercase__ ) _lowerCamelCase : int = ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) _lowerCamelCase : Optional[int] = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ): _lowerCamelCase : int = B'' _lowerCamelCase, _lowerCamelCase : Dict = stride if stride_left + stride_right >= chunk_len: raise ValueError( f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) _lowerCamelCase : str = 0 for raw in iterator: acc += raw if stream and len(lowercase__ ) < chunk_len: _lowerCamelCase : Optional[int] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowercase__ ) >= chunk_len: # We are flushing the accumulator _lowerCamelCase : str = (_stride_left, stride_right) _lowerCamelCase : str = {'raw': acc[:chunk_len], 'stride': stride} if stream: _lowerCamelCase : List[Any] = False yield item _lowerCamelCase : Optional[Any] = stride_left _lowerCamelCase : str = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowercase__ ) > stride_left: _lowerCamelCase : Optional[Any] = {'raw': acc, 'stride': (_stride_left, 0)} if stream: _lowerCamelCase : Tuple = False yield item def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : int = 2**24 # 16Mo try: with subprocess.Popen(lowercase__ , stdout=subprocess.PIPE , bufsize=lowercase__ ) as ffmpeg_process: while True: _lowerCamelCase : Optional[Any] = ffmpeg_process.stdout.read(lowercase__ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
96
1
"""simple docstring""" import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( 'kwargs, expected' , [ ({'num_shards': 0, 'max_num_jobs': 1}, []), ({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]), ({'num_shards': 10, 'max_num_jobs': 10}, [range(lowercase__ , i + 1 ) for i in range(10 )]), ({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]), ({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : int = _distribute_shards(**lowercase__ ) assert out == expected @pytest.mark.parametrize( 'gen_kwargs, max_num_jobs, expected' , [ ({'foo': 0}, 10, [{'foo': 0}]), ({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]), ({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]), ({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]), ({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]), ] , ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : int = _split_gen_kwargs(lowercase__ , lowercase__ ) assert out == expected @pytest.mark.parametrize( 'gen_kwargs, expected' , [ ({'foo': 0}, 1), ({'shards': [0]}, 1), ({'shards': [0, 1, 2, 3]}, 4), ({'shards': [0, 1, 2, 3], 'foo': 0}, 4), ({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4), ({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError), ] , ) def _snake_case ( lowercase__ , lowercase__ ): if expected is RuntimeError: with pytest.raises(lowercase__ ): _number_of_shards_in_gen_kwargs(lowercase__ ) else: _lowerCamelCase : Tuple = _number_of_shards_in_gen_kwargs(lowercase__ ) assert out == expected
96
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""} class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """ctrl""" lowerCamelCase__ = ["""past_key_values"""] lowerCamelCase__ = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , lowercase=246534 , lowercase=256 , lowercase=1280 , lowercase=8192 , lowercase=48 , lowercase=16 , lowercase=0.1 , lowercase=0.1 , lowercase=1E-6 , lowercase=0.02 , lowercase=True , **lowercase , ): _lowerCamelCase : Any = vocab_size _lowerCamelCase : Dict = n_positions _lowerCamelCase : Optional[int] = n_embd _lowerCamelCase : str = n_layer _lowerCamelCase : Union[str, Any] = n_head _lowerCamelCase : Any = dff _lowerCamelCase : int = resid_pdrop _lowerCamelCase : Dict = embd_pdrop _lowerCamelCase : Union[str, Any] = layer_norm_epsilon _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : str = use_cache super().__init__(**lowercase )
96
1
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""", } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """t5""" lowerCamelCase__ = ["""past_key_values"""] lowerCamelCase__ = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , lowercase=32128 , lowercase=512 , lowercase=64 , lowercase=2048 , lowercase=6 , lowercase=None , lowercase=8 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="relu" , lowercase=True , lowercase=True , lowercase=0 , lowercase=1 , **lowercase , ): _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : Any = d_model _lowerCamelCase : List[str] = d_kv _lowerCamelCase : Optional[int] = d_ff _lowerCamelCase : Union[str, Any] = num_layers _lowerCamelCase : str = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry _lowerCamelCase : Union[str, Any] = num_heads _lowerCamelCase : Any = relative_attention_num_buckets _lowerCamelCase : Tuple = relative_attention_max_distance _lowerCamelCase : List[str] = dropout_rate _lowerCamelCase : Optional[Any] = layer_norm_epsilon _lowerCamelCase : Dict = initializer_factor _lowerCamelCase : Dict = feed_forward_proj _lowerCamelCase : Union[str, Any] = use_cache _lowerCamelCase : Tuple = self.feed_forward_proj.split('-' ) _lowerCamelCase : Tuple = act_info[-1] _lowerCamelCase : str = act_info[0] == 'gated' if len(lowercase ) > 1 and act_info[0] != "gated" or len(lowercase ) > 2: raise ValueError( F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": _lowerCamelCase : int = 'gelu_new' super().__init__( pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , **lowercase , ) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' @property def A_ ( self ): _lowerCamelCase : Any = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: _lowerCamelCase : int = 'past_encoder_sequence + sequence' _lowerCamelCase : Union[str, Any] = {0: 'batch'} _lowerCamelCase : Optional[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: _lowerCamelCase : Tuple = {0: 'batch', 1: 'decoder_sequence'} _lowerCamelCase : Dict = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowercase , direction='inputs' ) return common_inputs @property def A_ ( self ): return 13
96
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Any class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase ): _lowerCamelCase : Any = data _lowerCamelCase : Node | None = None class lowerCAmelCase__ : '''simple docstring''' def __init__( self ): _lowerCamelCase : str = None _lowerCamelCase : str = None def __iter__( self ): _lowerCamelCase : List[str] = self.head while self.head: yield node.data _lowerCamelCase : Optional[int] = node.next if node == self.head: break def __len__( self ): return sum(1 for _ in self ) def __repr__( self ): return "->".join(str(lowercase ) for item in iter(self ) ) def A_ ( self , lowercase ): self.insert_nth(len(self ) , lowercase ) def A_ ( self , lowercase ): self.insert_nth(0 , lowercase ) def A_ ( self , lowercase , lowercase ): if index < 0 or index > len(self ): raise IndexError('list index out of range.' ) _lowerCamelCase : List[Any] = Node(lowercase ) if self.head is None: _lowerCamelCase : str = new_node # first node points itself _lowerCamelCase : Union[str, Any] = new_node elif index == 0: # insert at head _lowerCamelCase : List[str] = self.head _lowerCamelCase : str = new_node else: _lowerCamelCase : Union[str, Any] = self.head for _ in range(index - 1 ): _lowerCamelCase : List[Any] = temp.next _lowerCamelCase : Union[str, Any] = temp.next _lowerCamelCase : List[str] = new_node if index == len(self ) - 1: # insert at tail _lowerCamelCase : Any = new_node def A_ ( self ): return self.delete_nth(0 ) def A_ ( self ): return self.delete_nth(len(self ) - 1 ) def A_ ( self , lowercase = 0 ): if not 0 <= index < len(self ): raise IndexError('list index out of range.' ) _lowerCamelCase : Any = self.head if self.head == self.tail: # just one node _lowerCamelCase : List[str] = None elif index == 0: # delete head node _lowerCamelCase : List[str] = self.tail.next.next _lowerCamelCase : Optional[int] = self.head.next else: _lowerCamelCase : Dict = self.head for _ in range(index - 1 ): _lowerCamelCase : List[Any] = temp.next _lowerCamelCase : int = temp.next _lowerCamelCase : Optional[int] = temp.next.next if index == len(self ) - 1: # delete at tail _lowerCamelCase : List[Any] = temp return delete_node.data def A_ ( self ): return len(self ) == 0 def _snake_case ( ): _lowerCamelCase : Union[str, Any] = CircularLinkedList() assert len(lowercase__ ) == 0 assert circular_linked_list.is_empty() is True assert str(lowercase__ ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(lowercase__ ) == i circular_linked_list.insert_nth(lowercase__ , i + 1 ) assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
96
1
"""simple docstring""" lowercase__ = { """Pillow""": """Pillow<10.0.0""", """accelerate""": """accelerate>=0.20.3""", """av""": """av==9.2.0""", """beautifulsoup4""": """beautifulsoup4""", """black""": """black~=23.1""", """codecarbon""": """codecarbon==1.2.0""", """cookiecutter""": """cookiecutter==1.7.3""", """dataclasses""": """dataclasses""", """datasets""": """datasets!=2.5.0""", """decord""": """decord==0.6.0""", """deepspeed""": """deepspeed>=0.9.3""", """diffusers""": """diffusers""", """dill""": """dill<0.3.5""", """evaluate""": """evaluate>=0.2.0""", """fairscale""": """fairscale>0.3""", """faiss-cpu""": """faiss-cpu""", """fastapi""": """fastapi""", """filelock""": """filelock""", """flax""": """flax>=0.4.1,<=0.7.0""", """ftfy""": """ftfy""", """fugashi""": """fugashi>=1.0""", """GitPython""": """GitPython<3.1.19""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""", """importlib_metadata""": """importlib_metadata""", """ipadic""": """ipadic>=1.0.0,<2.0""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""", """jaxlib""": """jaxlib>=0.1.65,<=0.4.13""", """jieba""": """jieba""", """kenlm""": """kenlm""", """keras-nlp""": """keras-nlp>=0.3.1""", """librosa""": """librosa""", """nltk""": """nltk""", """natten""": """natten>=0.14.6""", """numpy""": """numpy>=1.17""", """onnxconverter-common""": """onnxconverter-common""", """onnxruntime-tools""": """onnxruntime-tools>=1.4.2""", """onnxruntime""": """onnxruntime>=1.4.0""", """opencv-python""": """opencv-python""", """optuna""": """optuna""", """optax""": """optax>=0.0.8,<=0.1.4""", """packaging""": """packaging>=20.0""", """parameterized""": """parameterized""", """phonemizer""": """phonemizer""", """protobuf""": """protobuf""", """psutil""": """psutil""", """pyyaml""": """pyyaml>=5.1""", """pydantic""": """pydantic<2""", """pytest""": """pytest>=7.2.0""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """python""": """python>=3.8.0""", """ray[tune]""": """ray[tune]""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """rhoknp""": """rhoknp>=1.1.0,<1.3.1""", """rjieba""": """rjieba""", """rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""", """ruff""": """ruff>=0.0.241,<=0.0.259""", """sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""", """sacremoses""": """sacremoses""", """safetensors""": """safetensors>=0.3.1""", """sagemaker""": """sagemaker>=2.31.0""", """scikit-learn""": """scikit-learn""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """sigopt""": """sigopt""", """starlette""": """starlette""", """sudachipy""": """sudachipy>=0.6.6""", """sudachidict_core""": """sudachidict_core>=20220729""", """tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""", """tensorflow""": """tensorflow>=2.6,<2.14""", """tensorflow-text""": """tensorflow-text<2.14""", """tf2onnx""": """tf2onnx""", """timeout-decorator""": """timeout-decorator""", """timm""": """timm""", """tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""", """torch""": """torch>=1.9,!=1.12.0""", """torchaudio""": """torchaudio""", """torchvision""": """torchvision""", """pyctcdecode""": """pyctcdecode>=0.4.0""", """tqdm""": """tqdm>=4.27""", """unidic""": """unidic>=1.0.2""", """unidic_lite""": """unidic_lite>=1.0.7""", """urllib3""": """urllib3<2.0.0""", """uvicorn""": """uvicorn""", }
96
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowercase__ = get_logger(__name__) class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = """dummy_data""" lowerCamelCase__ = """datasets""" lowerCamelCase__ = False def __init__( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = False , lowercase = True , lowercase = None , ): _lowerCamelCase : Optional[Any] = 0 _lowerCamelCase : Dict = dataset_name _lowerCamelCase : Union[str, Any] = cache_dir _lowerCamelCase : Dict = use_local_dummy_data _lowerCamelCase : Tuple = config # download_callbacks take a single url as input _lowerCamelCase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _lowerCamelCase : Any = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _lowerCamelCase : str = str(lowercase ) # to be downloaded _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : int = None @property def A_ ( self ): if self._dummy_file is None: _lowerCamelCase : Tuple = self.download_dummy_data() return self._dummy_file @property def A_ ( self ): if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('dummy' , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join('dummy' , self.version_name ) @property def A_ ( self ): return os.path.join(self.dummy_data_folder , 'dummy_data.zip' ) def A_ ( self ): _lowerCamelCase : List[str] = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _lowerCamelCase : int = cached_path( lowercase , cache_dir=self.cache_dir , extract_compressed_file=lowercase , force_extract=lowercase ) return os.path.join(lowercase , self.dummy_file_name ) @property def A_ ( self ): return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def A_ ( self ): if self._bucket_url is None: _lowerCamelCase : List[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) ) return self._bucket_url @property def A_ ( self ): # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] ) def A_ ( self , lowercase , *lowercase ): if self.load_existing_dummy_data: # dummy data is downloaded and tested _lowerCamelCase : Union[str, Any] = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _lowerCamelCase : Union[str, Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(lowercase , lowercase ): return self.create_dummy_data_dict(lowercase , lowercase ) elif isinstance(lowercase , (list, tuple) ): return self.create_dummy_data_list(lowercase , lowercase ) else: return self.create_dummy_data_single(lowercase , lowercase ) def A_ ( self , lowercase , *lowercase ): return self.download_and_extract(lowercase ) def A_ ( self , lowercase , lowercase ): return self.download_and_extract(lowercase ) def A_ ( self , lowercase , *lowercase , **lowercase ): return path def A_ ( self ): return {} def A_ ( self , lowercase , lowercase ): _lowerCamelCase : Optional[int] = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(lowercase , lowercase ): for single_url in single_urls: download_callback(lowercase ) else: _lowerCamelCase : List[Any] = single_urls download_callback(lowercase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(lowercase , lowercase ): _lowerCamelCase : List[Any] = [os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) ) for x in single_urls] else: _lowerCamelCase : Optional[int] = single_urls _lowerCamelCase : List[Any] = os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) ) _lowerCamelCase : int = value # make sure that values are unique if all(isinstance(lowercase , lowercase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def A_ ( self , lowercase , lowercase ): _lowerCamelCase : Optional[Any] = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _lowerCamelCase : List[str] = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , lowercase ) ) for url in data_url ) _lowerCamelCase : int = all( url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _lowerCamelCase : List[str] = [data_url[0]] * len(lowercase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(lowercase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : str = os.path.join(lowercase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) ) dummy_data_list.append(lowercase ) return dummy_data_list def A_ ( self , lowercase , lowercase ): for download_callback in self.download_callbacks: download_callback(lowercase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : Tuple = os.path.join(lowercase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) ) if os.path.exists(lowercase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def A_ ( self ): pass def A_ ( self ): pass def A_ ( self , lowercase ): def _iter_archive_members(lowercase ): # this preserves the order of the members inside the ZIP archive _lowerCamelCase : str = Path(self.dummy_file ).parent _lowerCamelCase : Union[str, Any] = path.relative_to(lowercase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _lowerCamelCase : List[str] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(lowercase ) _lowerCamelCase : Optional[int] = Path(lowercase ) _lowerCamelCase : Dict = _iter_archive_members(lowercase ) if self.use_local_dummy_data else path.rglob('*' ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('.', '__') ): yield file_path.relative_to(lowercase ).as_posix(), file_path.open('rb' ) def A_ ( self , lowercase ): if not isinstance(lowercase , lowercase ): _lowerCamelCase : List[str] = [paths] for path in paths: if os.path.isfile(lowercase ): if os.path.basename(lowercase ).startswith(('.', '__') ): return yield path else: for dirpath, dirnames, filenames in os.walk(lowercase ): if os.path.basename(lowercase ).startswith(('.', '__') ): continue dirnames.sort() for filename in sorted(lowercase ): if filename.startswith(('.', '__') ): continue yield os.path.join(lowercase , lowercase )
96
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin lowercase__ = False @skip_mps class lowerCAmelCase__ ( lowercase, lowercase, lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = StableDiffusionAttendAndExcitePipeline lowerCamelCase__ = False lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} ) lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def A_ ( cls ): super().setUpClass() torch.use_deterministic_algorithms(lowercase ) @classmethod def A_ ( cls ): super().tearDownClass() torch.use_deterministic_algorithms(lowercase ) def A_ ( self ): torch.manual_seed(0 ) _lowerCamelCase : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowercase , ) _lowerCamelCase : Tuple = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowercase , set_alpha_to_one=lowercase , ) torch.manual_seed(0 ) _lowerCamelCase : List[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) _lowerCamelCase : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) _lowerCamelCase : Dict = CLIPTextModel(lowercase ) _lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _lowerCamelCase : Any = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def A_ ( self , lowercase , lowercase=0 ): if str(lowercase ).startswith('mps' ): _lowerCamelCase : Any = torch.manual_seed(lowercase ) else: _lowerCamelCase : str = torch.Generator(device=lowercase ).manual_seed(lowercase ) _lowerCamelCase : Optional[int] = { 'prompt': 'a cat and a frog', 'token_indices': [2, 5], 'generator': generator, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', 'max_iter_to_alter': 2, 'thresholds': {0: 0.7}, } return inputs def A_ ( self ): _lowerCamelCase : Union[str, Any] = 'cpu' _lowerCamelCase : Optional[Any] = self.get_dummy_components() _lowerCamelCase : List[Any] = self.pipeline_class(**lowercase ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : str = self.get_dummy_inputs(lowercase ) _lowerCamelCase : Tuple = pipe(**lowercase ).images _lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3) ) _lowerCamelCase : List[Any] = np.array( [0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] ) _lowerCamelCase : str = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase , 1E-3 ) def A_ ( self ): super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def A_ ( self ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def A_ ( self ): self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 ) def A_ ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def A_ ( self ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def A_ ( self ): super().test_save_load_local(expected_max_difference=5E-4 ) def A_ ( self ): super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @classmethod def A_ ( cls ): super().setUpClass() torch.use_deterministic_algorithms(lowercase ) @classmethod def A_ ( cls ): super().tearDownClass() torch.use_deterministic_algorithms(lowercase ) def A_ ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self ): _lowerCamelCase : Any = torch.manual_seed(51 ) _lowerCamelCase : int = StableDiffusionAttendAndExcitePipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , safety_checker=lowercase , torch_dtype=torch.floataa ) pipe.to('cuda' ) _lowerCamelCase : Tuple = 'a painting of an elephant with glasses' _lowerCamelCase : Optional[Any] = [5, 7] _lowerCamelCase : Tuple = pipe( prompt=lowercase , token_indices=lowercase , guidance_scale=7.5 , generator=lowercase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0] _lowerCamelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' ) assert np.abs((expected_image - image).max() ) < 5E-1
96
"""simple docstring""" def _snake_case ( lowercase__ ): stooge(lowercase__ , 0 , len(lowercase__ ) - 1 ) return arr def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: _lowerCamelCase, _lowerCamelCase : Optional[Any] = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: _lowerCamelCase : Union[str, Any] = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) # Recursively sort last 2/3 elements stooge(lowercase__ , i + t , (lowercase__) ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) if __name__ == "__main__": lowercase__ = input("""Enter numbers separated by a comma:\n""").strip() lowercase__ = [int(item) for item in user_input.split(""",""")] print(stooge_sort(unsorted))
96
1
"""simple docstring""" import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(lowercase ), """Tatoeba directory does not exist.""" ) class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def A_ ( self ): _lowerCamelCase : Dict = tempfile.mkdtemp() return TatoebaConverter(save_dir=lowercase ) @slow def A_ ( self ): self.resolver.convert_models(['heb-eng'] ) @slow def A_ ( self ): _lowerCamelCase, _lowerCamelCase : Dict = self.resolver.write_model_card('opus-mt-he-en' , dry_run=lowercase ) assert mmeta["long_pair"] == "heb-eng"
96
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = ["""image_processor""", """tokenizer"""] lowerCamelCase__ = """BlipImageProcessor""" lowerCamelCase__ = """AutoTokenizer""" def __init__( self , lowercase , lowercase , lowercase ): super().__init__(lowercase , lowercase ) # add QFormer tokenizer _lowerCamelCase : int = qformer_tokenizer def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ): if images is None and text is None: raise ValueError('You have to specify at least images or text.' ) _lowerCamelCase : int = BatchFeature() if text is not None: _lowerCamelCase : List[str] = self.tokenizer( text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) encoding.update(lowercase ) _lowerCamelCase : List[str] = self.qformer_tokenizer( text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) _lowerCamelCase : List[Any] = qformer_text_encoding.pop('input_ids' ) _lowerCamelCase : Tuple = qformer_text_encoding.pop('attention_mask' ) if images is not None: _lowerCamelCase : int = self.image_processor(lowercase , return_tensors=lowercase ) encoding.update(lowercase ) return encoding def A_ ( self , *lowercase , **lowercase ): return self.tokenizer.batch_decode(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.tokenizer.decode(*lowercase , **lowercase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def A_ ( self ): _lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names _lowerCamelCase : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def A_ ( self , lowercase , **lowercase ): if os.path.isfile(lowercase ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(lowercase , exist_ok=lowercase ) _lowerCamelCase : Optional[Any] = os.path.join(lowercase , 'qformer_tokenizer' ) self.qformer_tokenizer.save_pretrained(lowercase ) return super().save_pretrained(lowercase , **lowercase ) @classmethod def A_ ( cls , lowercase , **lowercase ): _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , subfolder='qformer_tokenizer' ) _lowerCamelCase : Dict = cls._get_arguments_from_pretrained(lowercase , **lowercase ) args.append(lowercase ) return cls(*lowercase )
96
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ = { """configuration_time_series_transformer""": [ """TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimeSeriesTransformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TimeSeriesTransformerForPrediction""", """TimeSeriesTransformerModel""", """TimeSeriesTransformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
96
"""simple docstring""" import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) lowercase__ = logging.getLogger() def _snake_case ( lowercase__ ): _lowerCamelCase : List[Any] = {} _lowerCamelCase : List[Any] = os.path.join(lowercase__ , 'all_results.json' ) if os.path.exists(lowercase__ ): with open(lowercase__ , 'r' ) as f: _lowerCamelCase : List[Any] = json.load(lowercase__ ) else: raise ValueError(f'''can\'t find {path}''' ) return results lowercase__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def A_ ( self ): import xla_spawn _lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir() _lowerCamelCase : List[Any] = F''' ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(lowercase , 'argv' , lowercase ): _lowerCamelCase : Dict = time() xla_spawn.main() _lowerCamelCase : Any = time() _lowerCamelCase : Optional[int] = get_results(lowercase ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500 ) def A_ ( self ): import xla_spawn _lowerCamelCase : Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split() with patch.object(lowercase , 'argv' , lowercase ): xla_spawn.main()
96
1
"""simple docstring""" from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = ["""pixel_values"""] def __init__( self , lowercase = True , lowercase = 32 , lowercase=PILImageResampling.BILINEAR , lowercase = True , **lowercase , ): _lowerCamelCase : List[Any] = do_resize _lowerCamelCase : Dict = do_rescale _lowerCamelCase : Tuple = size_divisor _lowerCamelCase : List[str] = resample super().__init__(**lowercase ) def A_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase ): _lowerCamelCase, _lowerCamelCase : Dict = get_image_size(lowercase ) # Rounds the height and width down to the closest multiple of size_divisor _lowerCamelCase : List[str] = height // size_divisor * size_divisor _lowerCamelCase : Tuple = width // size_divisor * size_divisor _lowerCamelCase : int = resize(lowercase , (new_h, new_w) , resample=lowercase , data_format=lowercase , **lowercase ) return image def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ): return rescale(image=lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ): _lowerCamelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : Optional[Any] = size_divisor if size_divisor is not None else self.size_divisor _lowerCamelCase : Optional[Any] = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('size_divisor is required for resizing' ) _lowerCamelCase : Tuple = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError('Invalid image(s)' ) # All transformations expect numpy arrays. _lowerCamelCase : Any = [to_numpy_array(lowercase ) for img in images] if do_resize: _lowerCamelCase : Optional[int] = [self.resize(lowercase , size_divisor=lowercase , resample=lowercase ) for image in images] if do_rescale: _lowerCamelCase : List[str] = [self.rescale(lowercase , scale=1 / 255 ) for image in images] _lowerCamelCase : Union[str, Any] = [to_channel_dimension_format(lowercase , lowercase ) for image in images] _lowerCamelCase : str = {'pixel_values': images} return BatchFeature(data=lowercase , tensor_type=lowercase )
96
"""simple docstring""" from __future__ import annotations import math import numpy as np from numpy.linalg import norm def _snake_case ( lowercase__ , lowercase__ ): return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowercase__ , lowercase__ ) ) ) def _snake_case ( lowercase__ , lowercase__ ): if dataset.ndim != value_array.ndim: _lowerCamelCase : Tuple = ( 'Wrong input data\'s dimensions... ' f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}''' ) raise ValueError(lowercase__ ) try: if dataset.shape[1] != value_array.shape[1]: _lowerCamelCase : Optional[int] = ( 'Wrong input data\'s shape... ' f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}''' ) raise ValueError(lowercase__ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('Wrong shape' ) if dataset.dtype != value_array.dtype: _lowerCamelCase : int = ( 'Input data have different datatype... ' f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}''' ) raise TypeError(lowercase__ ) _lowerCamelCase : Optional[int] = [] for value in value_array: _lowerCamelCase : Tuple = euclidean(lowercase__ , dataset[0] ) _lowerCamelCase : Union[str, Any] = dataset[0].tolist() for dataset_value in dataset[1:]: _lowerCamelCase : Optional[Any] = euclidean(lowercase__ , lowercase__ ) if dist > temp_dist: _lowerCamelCase : List[Any] = temp_dist _lowerCamelCase : List[str] = dataset_value.tolist() answer.append([vector, dist] ) return answer def _snake_case ( lowercase__ , lowercase__ ): return np.dot(lowercase__ , lowercase__ ) / (norm(lowercase__ ) * norm(lowercase__ )) if __name__ == "__main__": import doctest doctest.testmod()
96
1
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( """--original_config_file""", default=None, type=str, help="""The YAML config file corresponding to the original architecture.""", ) parser.add_argument( """--num_in_channels""", default=None, type=int, help="""The number of input channels. If `None` number of input channels will be automatically inferred.""", ) parser.add_argument( """--scheduler_type""", default="""pndm""", type=str, help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""", ) parser.add_argument( """--pipeline_type""", default=None, type=str, help=( """The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'""" """. If `None` pipeline will be automatically inferred.""" ), ) parser.add_argument( """--image_size""", default=None, type=int, help=( """The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2""" """ Base. Use 768 for Stable Diffusion v2.""" ), ) parser.add_argument( """--prediction_type""", default=None, type=str, help=( """The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable""" """ Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2.""" ), ) parser.add_argument( """--extract_ema""", action="""store_true""", help=( """Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights""" """ or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield""" """ higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.""" ), ) parser.add_argument( """--upcast_attention""", action="""store_true""", help=( """Whether the attention computation should always be upcasted. This is necessary when running stable""" """ diffusion 2.1.""" ), ) parser.add_argument( """--from_safetensors""", action="""store_true""", help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""", ) parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") parser.add_argument( """--stable_unclip""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""", ) parser.add_argument( """--stable_unclip_prior""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""", ) parser.add_argument( """--clip_stats_path""", type=str, help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""", required=False, ) parser.add_argument( """--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint.""" ) parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--vae_path""", type=str, default=None, required=False, help="""Set to a path, hub id to an already converted vae to not convert it again.""", ) lowercase__ = parser.parse_args() lowercase__ = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
96
"""simple docstring""" import socket def _snake_case ( ): _lowerCamelCase : List[Any] = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) _lowerCamelCase : Union[str, Any] = socket.gethostname() _lowerCamelCase : List[Any] = 12312 sock.connect((host, port) ) sock.send(B'Hello server!' ) with open('Received_file' , 'wb' ) as out_file: print('File opened' ) print('Receiving data...' ) while True: _lowerCamelCase : int = sock.recv(1024 ) if not data: break out_file.write(lowercase__ ) print('Successfully received the file' ) sock.close() print('Connection closed' ) if __name__ == "__main__": main()
96
1
"""simple docstring""" import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process lowercase__ = logging.getLogger(__name__) lowercase__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) lowercase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = field( default=lowercase, metadata={ """help""": ( """The model checkpoint for weights initialization. Leave None if you want to train a model from""" """ scratch.""" ) }, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowercase )}, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, ) @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """The input training data file (a text file)."""} ) lowerCamelCase__ = field( default=lowercase, metadata={ """help""": ( """The input training data files (multiple files in glob format). """ """Very often splitting large files to smaller files can prevent tokenizer going out of memory""" ) }, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""}, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""}, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""}, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""}, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} ) lowerCamelCase__ = field(default=lowercase, metadata={"""help""": """Whether ot not to use whole word mask."""} ) lowerCamelCase__ = field( default=0.15, metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} ) lowerCamelCase__ = field( default=1 / 6, metadata={ """help""": ( """Ratio of length of a span of masked tokens to surrounding context length for permutation language""" """ modeling.""" ) }, ) lowerCamelCase__ = field( default=5, metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} ) lowerCamelCase__ = field( default=-1, metadata={ """help""": ( """Optional input sequence length after tokenization.""" """The training dataset will be truncated in block of this size for training.""" """Default to the model max input length for single sentence inputs (take into account special tokens).""" ) }, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ = False , lowercase__ = None , ): def _dataset(lowercase__ , lowercase__=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' ) return LineByLineWithRefDataset( tokenizer=lowercase__ , file_path=lowercase__ , block_size=args.block_size , ref_path=lowercase__ , ) return LineByLineTextDataset(tokenizer=lowercase__ , file_path=lowercase__ , block_size=args.block_size ) else: return TextDataset( tokenizer=lowercase__ , file_path=lowercase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowercase__ , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(lowercase__ ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _lowerCamelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( 'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file ' 'or remove the --do_eval argument.' ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , lowercase__ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: _lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: _lowerCamelCase : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: _lowerCamelCase : Any = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.tokenizer_name: _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: _lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another' ' script, save it,and load it from here, using --tokenizer_name' ) if model_args.model_name_or_path: _lowerCamelCase : str = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , ) else: logger.info('Training new model from scratch' ) _lowerCamelCase : int = AutoModelWithLMHead.from_config(lowercase__ ) model.resize_token_embeddings(len(lowercase__ ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( 'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the' '--mlm flag (masked language modeling).' ) if data_args.block_size <= 0: _lowerCamelCase : Dict = tokenizer.max_len # Our input block size will be the max possible for the model else: _lowerCamelCase : Tuple = min(data_args.block_size , tokenizer.max_len ) # Get datasets _lowerCamelCase : Optional[Any] = ( get_dataset(lowercase__ , tokenizer=lowercase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) _lowerCamelCase : Tuple = ( get_dataset(lowercase__ , tokenizer=lowercase__ , evaluate=lowercase__ , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": _lowerCamelCase : List[str] = DataCollatorForPermutationLanguageModeling( tokenizer=lowercase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: _lowerCamelCase : Optional[Any] = DataCollatorForWholeWordMask( tokenizer=lowercase__ , mlm_probability=data_args.mlm_probability ) else: _lowerCamelCase : Dict = DataCollatorForLanguageModeling( tokenizer=lowercase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer _lowerCamelCase : str = Trainer( model=lowercase__ , args=lowercase__ , data_collator=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , prediction_loss_only=lowercase__ , ) # Training if training_args.do_train: _lowerCamelCase : Optional[Any] = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=lowercase__ ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _lowerCamelCase : Any = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) _lowerCamelCase : Optional[int] = trainer.evaluate() _lowerCamelCase : int = math.exp(eval_output['eval_loss'] ) _lowerCamelCase : str = {'perplexity': perplexity} _lowerCamelCase : str = os.path.join(training_args.output_dir , 'eval_results_lm.txt' ) if trainer.is_world_master(): with open(lowercase__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , lowercase__ , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) results.update(lowercase__ ) return results def _snake_case ( lowercase__ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
96
"""simple docstring""" from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record lowercase__ = """\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } """ lowercase__ = """\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. """ lowercase__ = """ Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for 'record': list of question-answer dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'prediction_text': the predicted answer text - for 'multirc': list of question-answer dictionaries with the following keys: - 'idx': index of the question-answer pair as specified by the dataset - 'prediction': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for 'record': list of question-answers dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'answers': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for 'record': - 'exact_match': Exact match between answer and gold answer - 'f1': F1 score - for 'multirc': - 'exact_match': Exact match between answer and gold answer - 'f1_m': Per-question macro-F1 score - 'f1_a': Average F1 score over all answers - for 'axb': 'matthews_correlation': Matthew Correlation - for 'cb': - 'accuracy': Accuracy - 'f1': F1 score - for all others: - 'accuracy': Accuracy Examples: >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'cb') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'record') >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}] >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc') >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'axb') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'matthews_correlation': 1.0} """ def _snake_case ( lowercase__ , lowercase__ ): return float((preds == labels).mean() ) def _snake_case ( lowercase__ , lowercase__ , lowercase__="binary" ): _lowerCamelCase : str = simple_accuracy(lowercase__ , lowercase__ ) _lowerCamelCase : Any = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ , average=lowercase__ ) ) return { "accuracy": acc, "f1": fa, } def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : Any = {} for id_pred, label in zip(lowercase__ , lowercase__ ): _lowerCamelCase : Tuple = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}''' _lowerCamelCase : Union[str, Any] = id_pred['prediction'] if question_id in question_map: question_map[question_id].append((pred, label) ) else: _lowerCamelCase : Optional[Any] = [(pred, label)] _lowerCamelCase, _lowerCamelCase : Optional[int] = [], [] for question, preds_labels in question_map.items(): _lowerCamelCase, _lowerCamelCase : Tuple = zip(*lowercase__ ) _lowerCamelCase : List[str] = fa_score(y_true=lowercase__ , y_pred=lowercase__ , average='macro' ) fas.append(lowercase__ ) _lowerCamelCase : int = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase__ ) ) ems.append(lowercase__ ) _lowerCamelCase : Optional[Any] = float(sum(lowercase__ ) / len(lowercase__ ) ) _lowerCamelCase : Optional[int] = sum(lowercase__ ) / len(lowercase__ ) _lowerCamelCase : List[Any] = float(fa_score(y_true=lowercase__ , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def A_ ( self ): if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( 'You should supply a configuration name selected in ' '["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , ) def A_ ( self ): if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value('int64' ), "query": datasets.Value('int64' ), }, "prediction_text": datasets.Value('string' ), }, "references": { "idx": { "passage": datasets.Value('int64' ), "query": datasets.Value('int64' ), }, "answers": datasets.Sequence(datasets.Value('string' ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value('int64' ), "paragraph": datasets.Value('int64' ), "question": datasets.Value('int64' ), }, "prediction": datasets.Value('int64' ), }, "references": datasets.Value('int64' ), } else: return { "predictions": datasets.Value('int64' ), "references": datasets.Value('int64' ), } def A_ ( self , lowercase , lowercase ): if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )} elif self.config_name == "cb": return acc_and_fa(lowercase , lowercase , fa_avg='macro' ) elif self.config_name == "record": _lowerCamelCase : List[str] = [ { 'qas': [ {'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]} for ref in references ] } ] _lowerCamelCase : Union[str, Any] = {pred['idx']['query']: pred['prediction_text'] for pred in predictions} return evaluate_record(lowercase , lowercase )[0] elif self.config_name == "multirc": return evaluate_multirc(lowercase , lowercase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(lowercase , lowercase )} else: raise KeyError( 'You should supply a configuration name selected in ' '["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
96
1
"""simple docstring""" import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , lowercase=2 , lowercase=8 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=16 , lowercase=5 , lowercase=2 , lowercase=36 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ): _lowerCamelCase : Optional[int] = parent _lowerCamelCase : Tuple = batch_size _lowerCamelCase : int = seq_length _lowerCamelCase : Union[str, Any] = is_training _lowerCamelCase : Any = use_input_mask _lowerCamelCase : Union[str, Any] = use_token_type_ids _lowerCamelCase : str = use_labels _lowerCamelCase : int = vocab_size _lowerCamelCase : Any = hidden_size _lowerCamelCase : int = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : Tuple = intermediate_size _lowerCamelCase : Optional[int] = hidden_act _lowerCamelCase : Optional[Any] = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCamelCase : Tuple = max_position_embeddings _lowerCamelCase : List[Any] = type_vocab_size _lowerCamelCase : str = type_sequence_label_size _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : str = num_labels _lowerCamelCase : Tuple = num_choices _lowerCamelCase : Optional[Any] = scope def A_ ( self ): _lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCamelCase : Tuple = None if self.use_input_mask: _lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCamelCase : Union[str, Any] = None if self.use_token_type_ids: _lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCamelCase : List[str] = None _lowerCamelCase : List[str] = None _lowerCamelCase : Optional[int] = None if self.use_labels: _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCamelCase : str = ids_tensor([self.batch_size] , self.num_choices ) _lowerCamelCase : List[str] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A_ ( self ): return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , ) def A_ ( self ): _lowerCamelCase : Optional[Any] = self.get_config() _lowerCamelCase : List[str] = 300 return config def A_ ( self ): ( ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ) : Union[str, Any] = self.prepare_config_and_inputs() _lowerCamelCase : List[Any] = True _lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : List[str] = MraModel(config=lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : List[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase ) _lowerCamelCase : Optional[Any] = model(lowercase , token_type_ids=lowercase ) _lowerCamelCase : str = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ): _lowerCamelCase : Dict = True _lowerCamelCase : Optional[Any] = MraModel(lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : str = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , ) _lowerCamelCase : Tuple = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , encoder_hidden_states=lowercase , ) _lowerCamelCase : int = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : Tuple = MraForMaskedLM(config=lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : Tuple = MraForQuestionAnswering(config=lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : Union[str, Any] = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : List[Any] = self.num_labels _lowerCamelCase : Optional[int] = MraForSequenceClassification(lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : Any = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : int = self.num_labels _lowerCamelCase : Optional[Any] = MraForTokenClassification(config=lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : List[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : Tuple = self.num_choices _lowerCamelCase : Optional[int] = MraForMultipleChoice(config=lowercase ) model.to(lowercase ) model.eval() _lowerCamelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : Tuple = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A_ ( self ): _lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ) : Union[str, Any] = config_and_inputs _lowerCamelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = () def A_ ( self ): _lowerCamelCase : Optional[Any] = MraModelTester(self ) _lowerCamelCase : int = ConfigTester(self , config_class=lowercase , hidden_size=37 ) def A_ ( self ): self.config_tester.run_common_tests() def A_ ( self ): _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def A_ ( self ): _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCamelCase : Dict = type self.model_tester.create_and_check_model(*lowercase ) def A_ ( self ): _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase ) def A_ ( self ): _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowercase ) def A_ ( self ): _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase ) def A_ ( self ): _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase ) def A_ ( self ): _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase ) @slow def A_ ( self ): for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Union[str, Any] = MraModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) @unittest.skip(reason='MRA does not output attentions' ) def A_ ( self ): return @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def A_ ( self ): _lowerCamelCase : Optional[Any] = MraModel.from_pretrained('uw-madison/mra-base-512-4' ) _lowerCamelCase : List[Any] = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowerCamelCase : Dict = model(lowercase )[0] _lowerCamelCase : Any = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , lowercase ) _lowerCamelCase : Optional[int] = torch.tensor( [[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) ) @slow def A_ ( self ): _lowerCamelCase : Union[str, Any] = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' ) _lowerCamelCase : Any = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowerCamelCase : Tuple = model(lowercase )[0] _lowerCamelCase : Optional[Any] = 50265 _lowerCamelCase : str = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , lowercase ) _lowerCamelCase : Tuple = torch.tensor( [[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) ) @slow def A_ ( self ): _lowerCamelCase : Dict = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' ) _lowerCamelCase : Any = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): _lowerCamelCase : str = model(lowercase )[0] _lowerCamelCase : Tuple = 50265 _lowerCamelCase : int = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , lowercase ) _lowerCamelCase : int = torch.tensor( [[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) )
96
"""simple docstring""" import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = DDIMPipeline lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS lowerCamelCase__ = PipelineTesterMixin.required_optional_params - { """num_images_per_prompt""", """latents""", """callback""", """callback_steps""", } lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS lowerCamelCase__ = False def A_ ( self ): torch.manual_seed(0 ) _lowerCamelCase : List[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) _lowerCamelCase : List[str] = DDIMScheduler() _lowerCamelCase : Optional[int] = {'unet': unet, 'scheduler': scheduler} return components def A_ ( self , lowercase , lowercase=0 ): if str(lowercase ).startswith('mps' ): _lowerCamelCase : Dict = torch.manual_seed(lowercase ) else: _lowerCamelCase : List[str] = torch.Generator(device=lowercase ).manual_seed(lowercase ) _lowerCamelCase : Tuple = { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def A_ ( self ): _lowerCamelCase : Any = 'cpu' _lowerCamelCase : Tuple = self.get_dummy_components() _lowerCamelCase : Optional[Any] = self.pipeline_class(**lowercase ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : str = self.get_dummy_inputs(lowercase ) _lowerCamelCase : int = pipe(**lowercase ).images _lowerCamelCase : Any = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3) ) _lowerCamelCase : Tuple = np.array( [1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] ) _lowerCamelCase : str = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase , 1E-3 ) def A_ ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def A_ ( self ): super().test_save_load_local(expected_max_difference=3E-3 ) def A_ ( self ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def A_ ( self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : Optional[Any] = 'google/ddpm-cifar10-32' _lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(lowercase ) _lowerCamelCase : Dict = DDIMScheduler() _lowerCamelCase : Dict = DDIMPipeline(unet=lowercase , scheduler=lowercase ) ddim.to(lowercase ) ddim.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : List[str] = torch.manual_seed(0 ) _lowerCamelCase : str = ddim(generator=lowercase , eta=0.0 , output_type='numpy' ).images _lowerCamelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _lowerCamelCase : List[Any] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A_ ( self ): _lowerCamelCase : Optional[int] = 'google/ddpm-ema-bedroom-256' _lowerCamelCase : str = UNetaDModel.from_pretrained(lowercase ) _lowerCamelCase : str = DDIMScheduler.from_pretrained(lowercase ) _lowerCamelCase : Optional[int] = DDIMPipeline(unet=lowercase , scheduler=lowercase ) ddpm.to(lowercase ) ddpm.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : Tuple = torch.manual_seed(0 ) _lowerCamelCase : int = ddpm(generator=lowercase , output_type='numpy' ).images _lowerCamelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _lowerCamelCase : str = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
96
1
"""simple docstring""" lowercase__ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)] def _snake_case ( lowercase__ ): _lowerCamelCase : Optional[int] = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100000] number //= 100000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution lowercase__ = [None] * 1000_0000 lowercase__ = True lowercase__ = False def _snake_case ( lowercase__ ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore _lowerCamelCase : Any = chain(next_number(lowercase__ ) ) _lowerCamelCase : Dict = number_chain while number < 10000000: _lowerCamelCase : Union[str, Any] = number_chain number *= 10 return number_chain def _snake_case ( lowercase__ = 10000000 ): for i in range(1 , lowercase__ ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod() print(F"{solution() = }")
96
"""simple docstring""" # Imports import numpy as np class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ): self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase ) def A_ ( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ): if red is not None: _lowerCamelCase : Optional[int] = red if green is not None: _lowerCamelCase : Optional[Any] = green if blue is not None: _lowerCamelCase : Tuple = blue if red_edge is not None: _lowerCamelCase : Optional[Any] = red_edge if nir is not None: _lowerCamelCase : Union[str, Any] = nir return True def A_ ( self , lowercase="" , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ): self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase ) _lowerCamelCase : str = { 'ARVI2': self.arvaa, 'CCCI': self.ccci, 'CVI': self.cvi, 'GLI': self.gli, 'NDVI': self.ndvi, 'BNDVI': self.bndvi, 'redEdgeNDVI': self.red_edge_ndvi, 'GNDVI': self.gndvi, 'GBNDVI': self.gbndvi, 'GRNDVI': self.grndvi, 'RBNDVI': self.rbndvi, 'PNDVI': self.pndvi, 'ATSAVI': self.atsavi, 'BWDRVI': self.bwdrvi, 'CIgreen': self.ci_green, 'CIrededge': self.ci_rededge, 'CI': self.ci, 'CTVI': self.ctvi, 'GDVI': self.gdvi, 'EVI': self.evi, 'GEMI': self.gemi, 'GOSAVI': self.gosavi, 'GSAVI': self.gsavi, 'Hue': self.hue, 'IVI': self.ivi, 'IPVI': self.ipvi, 'I': self.i, 'RVI': self.rvi, 'MRVI': self.mrvi, 'MSAVI': self.m_savi, 'NormG': self.norm_g, 'NormNIR': self.norm_nir, 'NormR': self.norm_r, 'NGRDI': self.ngrdi, 'RI': self.ri, 'S': self.s, 'IF': self._if, 'DVI': self.dvi, 'TVI': self.tvi, 'NDRE': self.ndre, } try: return funcs[index]() except KeyError: print('Index not in the list!' ) return False def A_ ( self ): return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def A_ ( self ): return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def A_ ( self ): return self.nir * (self.red / (self.green**2)) def A_ ( self ): return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def A_ ( self ): return (self.nir - self.red) / (self.nir + self.red) def A_ ( self ): return (self.nir - self.blue) / (self.nir + self.blue) def A_ ( self ): return (self.redEdge - self.red) / (self.redEdge + self.red) def A_ ( self ): return (self.nir - self.green) / (self.nir + self.green) def A_ ( self ): return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def A_ ( self ): return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def A_ ( self ): return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def A_ ( self ): return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def A_ ( self , lowercase=0.08 , lowercase=1.22 , lowercase=0.03 ): return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def A_ ( self ): return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def A_ ( self ): return (self.nir / self.green) - 1 def A_ ( self ): return (self.nir / self.redEdge) - 1 def A_ ( self ): return (self.red - self.blue) / self.red def A_ ( self ): _lowerCamelCase : Any = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def A_ ( self ): return self.nir - self.green def A_ ( self ): return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def A_ ( self ): _lowerCamelCase : Any = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red) def A_ ( self , lowercase=0.16 ): return (self.nir - self.green) / (self.nir + self.green + y) def A_ ( self , lowercase=0.5 ): return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def A_ ( self ): return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) def A_ ( self , lowercase=None , lowercase=None ): return (self.nir - b) / (a * self.red) def A_ ( self ): return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def A_ ( self ): return (self.red + self.green + self.blue) / 30.5 def A_ ( self ): return self.nir / self.red def A_ ( self ): return (self.rvi() - 1) / (self.rvi() + 1) def A_ ( self ): return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def A_ ( self ): return self.green / (self.nir + self.red + self.green) def A_ ( self ): return self.nir / (self.nir + self.red + self.green) def A_ ( self ): return self.red / (self.nir + self.red + self.green) def A_ ( self ): return (self.green - self.red) / (self.green + self.red) def A_ ( self ): return (self.red - self.green) / (self.red + self.green) def A_ ( self ): _lowerCamelCase : Union[str, Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) _lowerCamelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def A_ ( self ): return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def A_ ( self ): return self.nir / self.red def A_ ( self ): return (self.ndvi() + 0.5) ** (1 / 2) def A_ ( self ): return (self.nir - self.redEdge) / (self.nir + self.redEdge)
96
1
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def _snake_case ( lowercase__ ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() ) def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : Any = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _lowerCamelCase : List[Any] = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' ) _lowerCamelCase : Optional[Any] = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' ) _lowerCamelCase : Union[str, Any] = key.replace('heads.cmd.itm_head.cls' , 'itm_head' ) _lowerCamelCase : Optional[Any] = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' ) _lowerCamelCase : Tuple = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' ) _lowerCamelCase : List[Any] = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' ) _lowerCamelCase : str = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' ) _lowerCamelCase : Union[str, Any] = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' ) _lowerCamelCase : str = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' ) _lowerCamelCase : Optional[int] = key.replace('image_encoder.module' , 'flava.image_model' ) _lowerCamelCase : str = key.replace('text_encoder.module' , 'flava.text_model' ) _lowerCamelCase : Union[str, Any] = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' ) _lowerCamelCase : str = key.replace('mm_encoder.module' , 'flava.multimodal_model' ) _lowerCamelCase : List[str] = key.replace('text_projection' , 'flava.text_projection' ) _lowerCamelCase : Dict = key.replace('image_projection' , 'flava.image_projection' ) _lowerCamelCase : str = value.float() for key, value in codebook_state_dict.items(): _lowerCamelCase : Union[str, Any] = value return upgrade @torch.no_grad() def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None ): if config_path is not None: _lowerCamelCase : str = FlavaConfig.from_pretrained(lowercase__ ) else: _lowerCamelCase : List[str] = FlavaConfig() _lowerCamelCase : Optional[int] = FlavaForPreTraining(lowercase__ ).eval() _lowerCamelCase : int = convert_dalle_checkpoint(lowercase__ , lowercase__ , save_checkpoint=lowercase__ ) if os.path.exists(lowercase__ ): _lowerCamelCase : Union[str, Any] = torch.load(lowercase__ , map_location='cpu' ) else: _lowerCamelCase : Optional[int] = torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' ) _lowerCamelCase : List[Any] = upgrade_state_dict(lowercase__ , lowercase__ ) hf_model.load_state_dict(lowercase__ ) _lowerCamelCase : Any = hf_model.state_dict() _lowerCamelCase : Any = count_parameters(lowercase__ ) _lowerCamelCase : Optional[int] = count_parameters(lowercase__ ) + count_parameters(lowercase__ ) assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 ) hf_model.save_pretrained(lowercase__ ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowercase__ = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
96
"""simple docstring""" import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , lowercase , lowercase=768 ): super().__init__(lowercase ) _lowerCamelCase : Any = proj_size _lowerCamelCase : Dict = CLIPVisionModel(lowercase ) _lowerCamelCase : List[str] = PaintByExampleMapper(lowercase ) _lowerCamelCase : Optional[Any] = nn.LayerNorm(config.hidden_size ) _lowerCamelCase : int = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling _lowerCamelCase : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def A_ ( self , lowercase , lowercase=False ): _lowerCamelCase : Union[str, Any] = self.model(pixel_values=lowercase ) _lowerCamelCase : int = clip_output.pooler_output _lowerCamelCase : str = self.mapper(latent_states[:, None] ) _lowerCamelCase : List[Any] = self.final_layer_norm(lowercase ) _lowerCamelCase : Dict = self.proj_out(lowercase ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self , lowercase ): super().__init__() _lowerCamelCase : Tuple = (config.num_hidden_layers + 1) // 5 _lowerCamelCase : int = config.hidden_size _lowerCamelCase : Optional[Any] = 1 _lowerCamelCase : str = nn.ModuleList( [ BasicTransformerBlock(lowercase , lowercase , lowercase , activation_fn='gelu' , attention_bias=lowercase ) for _ in range(lowercase ) ] ) def A_ ( self , lowercase ): for block in self.blocks: _lowerCamelCase : Tuple = block(lowercase ) return hidden_states
96
1