code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING A = logging.get_logger(__name__) A = Dict[str, Any] A = List[Prediction] @add_end_docstrings(__magic_name__ ) class a__ ( __magic_name__ ): def __init__( self : Optional[int] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Any): """simple docstring""" super().__init__(*UpperCamelCase_ , **UpperCamelCase_) if self.framework == "tf": raise ValueError(F"The {self.__class__} is only available in PyTorch.") requires_backends(self , "vision") self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items())) def a_ ( self : Dict , **UpperCamelCase_ : Tuple): """simple docstring""" __UpperCAmelCase : Optional[Any] = {} if "threshold" in kwargs: __UpperCAmelCase : Dict = kwargs["threshold"] return {}, {}, postprocess_kwargs def __call__( self : str , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Tuple): """simple docstring""" return super().__call__(*UpperCamelCase_ , **UpperCamelCase_) def a_ ( self : int , UpperCamelCase_ : Dict): """simple docstring""" __UpperCAmelCase : Optional[int] = load_image(UpperCamelCase_) __UpperCAmelCase : Optional[int] = torch.IntTensor([[image.height, image.width]]) __UpperCAmelCase : str = self.image_processor(images=[image] , return_tensors="pt") if self.tokenizer is not None: __UpperCAmelCase : Any = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt") __UpperCAmelCase : List[Any] = target_size return inputs def a_ ( self : Tuple , UpperCamelCase_ : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Dict = model_inputs.pop("target_size") __UpperCAmelCase : Tuple = self.model(**UpperCamelCase_) __UpperCAmelCase : int = outputs.__class__({"target_size": target_size, **outputs}) if self.tokenizer is not None: __UpperCAmelCase : str = model_inputs["bbox"] return model_outputs def a_ ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple=0.9): """simple docstring""" __UpperCAmelCase : Dict = model_outputs["target_size"] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. __UpperCAmelCase , __UpperCAmelCase : str = target_size[0].tolist() def unnormalize(UpperCamelCase_ : List[str]): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ])) __UpperCAmelCase , __UpperCAmelCase : int = model_outputs["logits"].squeeze(0).softmax(dim=-1).max(dim=-1) __UpperCAmelCase : List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] __UpperCAmelCase : Optional[Any] = [unnormalize(UpperCamelCase_) for bbox in model_outputs["bbox"].squeeze(0)] __UpperCAmelCase : Union[str, Any] = ["score", "label", "box"] __UpperCAmelCase : Dict = [dict(zip(UpperCamelCase_ , UpperCamelCase_)) for vals in zip(scores.tolist() , UpperCamelCase_ , UpperCamelCase_) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel __UpperCAmelCase : List[str] = self.image_processor.post_process_object_detection(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[int] = raw_annotations[0] __UpperCAmelCase : Optional[int] = raw_annotation["scores"] __UpperCAmelCase : Dict = raw_annotation["labels"] __UpperCAmelCase : Tuple = raw_annotation["boxes"] __UpperCAmelCase : List[Any] = scores.tolist() __UpperCAmelCase : Any = [self.model.config.idalabel[label.item()] for label in labels] __UpperCAmelCase : Tuple = [self._get_bounding_box(UpperCamelCase_) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] __UpperCAmelCase : Union[str, Any] = ["score", "label", "box"] __UpperCAmelCase : Optional[int] = [ dict(zip(UpperCamelCase_ , UpperCamelCase_)) for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"]) ] return annotation def a_ ( self : int , UpperCamelCase_ : "torch.Tensor"): """simple docstring""" if self.framework != "pt": raise ValueError("The ObjectDetectionPipeline is only available in PyTorch.") __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = box.int().tolist() __UpperCAmelCase : Dict = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
77
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a__ ( unittest.TestCase ): def __init__( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any=13 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : int=224 , UpperCamelCase_ : int=30 , UpperCamelCase_ : str=400 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ): """simple docstring""" __UpperCAmelCase : Tuple = size if size is not None else {"height": 18, "width": 18} __UpperCAmelCase : List[Any] = parent __UpperCAmelCase : Tuple = batch_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : List[Any] = image_size __UpperCAmelCase : str = min_resolution __UpperCAmelCase : Tuple = max_resolution __UpperCAmelCase : Optional[Any] = do_resize __UpperCAmelCase : Any = size __UpperCAmelCase : Any = do_normalize __UpperCAmelCase : Any = image_mean __UpperCAmelCase : Optional[Any] = image_std def a_ ( self : str): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ViTImageProcessor if is_vision_available() else None def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Optional[Any] = EfficientFormerImageProcessorTester(self) @property def a_ ( self : Union[str, Any]): """simple docstring""" return self.image_proc_tester.prepare_image_processor_dict() def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase_ , "image_mean")) self.assertTrue(hasattr(UpperCamelCase_ , "image_std")) self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize")) self.assertTrue(hasattr(UpperCamelCase_ , "do_resize")) self.assertTrue(hasattr(UpperCamelCase_ , "size")) def a_ ( self : Dict): """simple docstring""" pass def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __UpperCAmelCase : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image) # Test not batched input __UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray) # Test not batched input __UpperCAmelCase : Tuple = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Any = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __UpperCAmelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor) # Test not batched input __UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
77
1
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() A = logging.get_logger(__name__) A = [ ("""bert.bert""", """visual_bert"""), ("""bert.cls""", """cls"""), ("""bert.classifier""", """cls"""), ("""token_type_embeddings_visual""", """visual_token_type_embeddings"""), ("""position_embeddings_visual""", """visual_position_embeddings"""), ("""projection""", """visual_projection"""), ] A = [ """nlvr2_coco_pre_trained.th""", """nlvr2_fine_tuned.th""", """nlvr2_pre_trained.th""", """vcr_coco_pre_train.th""", """vcr_fine_tune.th""", """vcr_pre_train.th""", """vqa_coco_pre_trained.th""", """vqa_fine_tuned.th""", """vqa_pre_trained.th""", ] def _UpperCamelCase ( UpperCamelCase ) -> List[Any]: """simple docstring""" __UpperCAmelCase : str = torch.load(UpperCamelCase , map_location="cpu" ) return sd def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=rename_keys_prefix ) -> List[str]: """simple docstring""" __UpperCAmelCase : Optional[Any] = OrderedDict() __UpperCAmelCase : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __UpperCAmelCase : Optional[int] = key for name_pair in rename_keys_prefix: __UpperCAmelCase : List[Any] = new_key.replace(name_pair[0] , name_pair[1] ) __UpperCAmelCase : Dict = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __UpperCAmelCase : Optional[Any] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: """simple docstring""" assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}." # Get Config if "pre" in checkpoint_path: __UpperCAmelCase : str = "pretraining" if "vcr" in checkpoint_path: __UpperCAmelCase : Optional[Any] = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: __UpperCAmelCase : Union[str, Any] = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: __UpperCAmelCase : Union[str, Any] = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: __UpperCAmelCase : List[Any] = {"visual_embedding_dim": 1024} else: raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." ) else: if "vcr" in checkpoint_path: __UpperCAmelCase : Union[str, Any] = {"visual_embedding_dim": 512} __UpperCAmelCase : Union[str, Any] = "multichoice" elif "vqa_advanced" in checkpoint_path: __UpperCAmelCase : Union[str, Any] = {"visual_embedding_dim": 2048} __UpperCAmelCase : Any = "vqa_advanced" elif "vqa" in checkpoint_path: __UpperCAmelCase : str = {"visual_embedding_dim": 2048, "num_labels": 3129} __UpperCAmelCase : List[str] = "vqa" elif "nlvr" in checkpoint_path: __UpperCAmelCase : Any = { "visual_embedding_dim": 1024, "num_labels": 2, } __UpperCAmelCase : List[Any] = "nlvr" __UpperCAmelCase : Optional[int] = VisualBertConfig(**UpperCamelCase ) # Load State Dict __UpperCAmelCase : Dict = load_state_dict(UpperCamelCase ) __UpperCAmelCase : List[Any] = get_new_dict(UpperCamelCase , UpperCamelCase ) if model_type == "pretraining": __UpperCAmelCase : Optional[int] = VisualBertForPreTraining(UpperCamelCase ) elif model_type == "vqa": __UpperCAmelCase : str = VisualBertForQuestionAnswering(UpperCamelCase ) elif model_type == "nlvr": __UpperCAmelCase : List[Any] = VisualBertForVisualReasoning(UpperCamelCase ) elif model_type == "multichoice": __UpperCAmelCase : Tuple = VisualBertForMultipleChoice(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) # Save Checkpoints Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) model.save_pretrained(UpperCamelCase ) if __name__ == "__main__": A = argparse.ArgumentParser() # Required parameters parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""") A = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
77
"""simple docstring""" from collections import namedtuple A = namedtuple("""from_to""", """from_ to""") A = { """cubicmeter""": from_to(1, 1), """litre""": from_to(0.001, 1_000), """kilolitre""": from_to(1, 1), """gallon""": from_to(0.00454, 264.172), """cubicyard""": from_to(0.76455, 1.30795), """cubicfoot""": from_to(0.028, 35.3147), """cup""": from_to(0.000236588, 4226.75), } def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: """simple docstring""" if from_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'from_type' value: {from_type!r} Supported values are:\n" + ", ".join(UpperCamelCase ) ) if to_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n" + ", ".join(UpperCamelCase ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
77
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class a__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase_ = StableDiffusionPanoramaPipeline lowercase_ = TEXT_TO_IMAGE_PARAMS lowercase_ = TEXT_TO_IMAGE_BATCH_PARAMS lowercase_ = TEXT_TO_IMAGE_IMAGE_PARAMS lowercase_ = TEXT_TO_IMAGE_IMAGE_PARAMS def a_ ( self : int): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) __UpperCAmelCase : List[str] = DDIMScheduler() torch.manual_seed(0) __UpperCAmelCase : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0) __UpperCAmelCase : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) __UpperCAmelCase : Optional[int] = CLIPTextModel(UpperCamelCase_) __UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") __UpperCAmelCase : List[str] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : List[Any]=0): """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch.manual_seed(UpperCamelCase_) __UpperCAmelCase : Dict = { "prompt": "a photo of the dolomites", "generator": generator, # Setting height and width to None to prevent OOMs on CPU. "height": None, "width": None, "num_inference_steps": 1, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def a_ ( self : List[Any]): """simple docstring""" __UpperCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase : Tuple = self.get_dummy_components() __UpperCAmelCase : Union[str, Any] = StableDiffusionPanoramaPipeline(**UpperCamelCase_) __UpperCAmelCase : Optional[int] = sd_pipe.to(UpperCamelCase_) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_) __UpperCAmelCase : Optional[int] = sd_pipe(**UpperCamelCase_).images __UpperCAmelCase : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCAmelCase : Any = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self : List[Any]): """simple docstring""" super().test_inference_batch_consistent(batch_sizes=[1, 2]) def a_ ( self : Optional[int]): """simple docstring""" super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase : Optional[Any] = self.get_dummy_components() __UpperCAmelCase : Any = StableDiffusionPanoramaPipeline(**UpperCamelCase_) __UpperCAmelCase : Dict = sd_pipe.to(UpperCamelCase_) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_) __UpperCAmelCase : List[str] = "french fries" __UpperCAmelCase : Any = sd_pipe(**UpperCamelCase_ , negative_prompt=UpperCamelCase_) __UpperCAmelCase : Optional[Any] = output.images __UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCAmelCase : str = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase : Optional[Any] = self.get_dummy_components() __UpperCAmelCase : Dict = StableDiffusionPanoramaPipeline(**UpperCamelCase_) __UpperCAmelCase : List[Any] = sd_pipe.to(UpperCamelCase_) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase_) __UpperCAmelCase : int = sd_pipe(**UpperCamelCase_ , view_batch_size=2) __UpperCAmelCase : Any = output.images __UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCAmelCase : Optional[Any] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase : Union[str, Any] = self.get_dummy_components() __UpperCAmelCase : Optional[Any] = EulerAncestralDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear") __UpperCAmelCase : Union[str, Any] = StableDiffusionPanoramaPipeline(**UpperCamelCase_) __UpperCAmelCase : List[str] = sd_pipe.to(UpperCamelCase_) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_) __UpperCAmelCase : str = sd_pipe(**UpperCamelCase_).images __UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCAmelCase : int = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase : int = self.get_dummy_components() __UpperCAmelCase : List[Any] = PNDMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=UpperCamelCase_) __UpperCAmelCase : Tuple = StableDiffusionPanoramaPipeline(**UpperCamelCase_) __UpperCAmelCase : str = sd_pipe.to(UpperCamelCase_) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[int] = self.get_dummy_inputs(UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = sd_pipe(**UpperCamelCase_).images __UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCAmelCase : Any = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @slow @require_torch_gpu class a__ ( unittest.TestCase ): def a_ ( self : List[Any]): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[int]=0): """simple docstring""" __UpperCAmelCase : List[str] = torch.manual_seed(UpperCamelCase_) __UpperCAmelCase : List[Any] = { "prompt": "a photo of the dolomites", "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Optional[Any] = "stabilityai/stable-diffusion-2-base" __UpperCAmelCase : Dict = DDIMScheduler.from_pretrained(UpperCamelCase_ , subfolder="scheduler") __UpperCAmelCase : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase_ , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_) pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) pipe.enable_attention_slicing() __UpperCAmelCase : Optional[Any] = self.get_inputs() __UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_).images __UpperCAmelCase : int = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) __UpperCAmelCase : Tuple = np.array( [ 0.36968392, 0.27025372, 0.32446766, 0.28379387, 0.36363274, 0.30733347, 0.27100027, 0.27054125, 0.25536096, ]) assert np.abs(expected_slice - image_slice).max() < 1e-2 def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Dict = StableDiffusionPanoramaPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base" , safety_checker=UpperCamelCase_) __UpperCAmelCase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) pipe.enable_attention_slicing() __UpperCAmelCase : Dict = self.get_inputs() __UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_).images __UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) __UpperCAmelCase : Optional[int] = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : List[Any] = 0 def callback_fn(UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor) -> None: __UpperCAmelCase : Any = True nonlocal number_of_steps number_of_steps += 1 if step == 1: __UpperCAmelCase : List[Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) __UpperCAmelCase : Dict = latents[0, -3:, -3:, -1] __UpperCAmelCase : Union[str, Any] = np.array( [ 0.18681869, 0.33907816, 0.5361276, 0.14432865, -0.02856611, -0.73941123, 0.23397987, 0.47322682, -0.37823164, ]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: __UpperCAmelCase : Tuple = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) __UpperCAmelCase : Any = latents[0, -3:, -3:, -1] __UpperCAmelCase : Optional[int] = np.array( [ 0.18539645, 0.33987248, 0.5378559, 0.14437142, -0.02455261, -0.7338317, 0.23990755, 0.47356272, -0.3786505, ]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 __UpperCAmelCase : Dict = False __UpperCAmelCase : Any = "stabilityai/stable-diffusion-2-base" __UpperCAmelCase : str = DDIMScheduler.from_pretrained(UpperCamelCase_ , subfolder="scheduler") __UpperCAmelCase : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase_ , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_) __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) pipe.enable_attention_slicing() __UpperCAmelCase : str = self.get_inputs() pipe(**UpperCamelCase_ , callback=UpperCamelCase_ , callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 3 def a_ ( self : str): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __UpperCAmelCase : Optional[int] = "stabilityai/stable-diffusion-2-base" __UpperCAmelCase : Any = DDIMScheduler.from_pretrained(UpperCamelCase_ , subfolder="scheduler") __UpperCAmelCase : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase_ , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_) __UpperCAmelCase : Tuple = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() __UpperCAmelCase : int = self.get_inputs() __UpperCAmelCase : Optional[Any] = pipe(**UpperCamelCase_) __UpperCAmelCase : str = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
77
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ShapEPipeline lowercase_ = ["prompt"] lowercase_ = ["prompt"] lowercase_ = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] lowercase_ = False @property def a_ ( self : Optional[int]): """simple docstring""" return 32 @property def a_ ( self : Any): """simple docstring""" return 32 @property def a_ ( self : int): """simple docstring""" return self.time_input_dim * 4 @property def a_ ( self : List[Any]): """simple docstring""" return 8 @property def a_ ( self : List[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def a_ ( self : List[str]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(UpperCamelCase_) @property def a_ ( self : Any): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Union[str, Any] = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } __UpperCAmelCase : Dict = PriorTransformer(**UpperCamelCase_) return model @property def a_ ( self : Union[str, Any]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Tuple = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } __UpperCAmelCase : List[Any] = ShapERenderer(**UpperCamelCase_) return model def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.dummy_prior __UpperCAmelCase : str = self.dummy_text_encoder __UpperCAmelCase : int = self.dummy_tokenizer __UpperCAmelCase : int = self.dummy_renderer __UpperCAmelCase : Tuple = HeunDiscreteScheduler( beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , ) __UpperCAmelCase : str = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "renderer": renderer, "scheduler": scheduler, } return components def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any=0): """simple docstring""" if str(UpperCamelCase_).startswith("mps"): __UpperCAmelCase : List[Any] = torch.manual_seed(UpperCamelCase_) else: __UpperCAmelCase : str = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_) __UpperCAmelCase : List[Any] = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "np", } return inputs def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : str = "cpu" __UpperCAmelCase : Union[str, Any] = self.get_dummy_components() __UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase_)) __UpperCAmelCase : Union[str, Any] = output.images[0] __UpperCAmelCase : str = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __UpperCAmelCase : Union[str, Any] = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self : Tuple): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2]) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch_device == "cpu" __UpperCAmelCase : Optional[Any] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , ) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.get_dummy_components() __UpperCAmelCase : List[str] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : int = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Any = 2 __UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_) for key in inputs.keys(): if key in self.batch_params: __UpperCAmelCase : List[Any] = batch_size * [inputs[key]] __UpperCAmelCase : List[Any] = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_)[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class a__ ( unittest.TestCase ): def a_ ( self : List[str]): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy") __UpperCAmelCase : Optional[Any] = ShapEPipeline.from_pretrained("openai/shap-e") __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Dict = torch.Generator(device=UpperCamelCase_).manual_seed(0) __UpperCAmelCase : int = pipe( "a shark" , generator=UpperCamelCase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_)
77
1
"""simple docstring""" import unittest import torch from torch import nn from diffusers.models.activations import get_activation class a__ ( unittest.TestCase ): def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Optional[int] = get_activation("swish") self.assertIsInstance(UpperCamelCase_ , nn.SiLU) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Tuple = get_activation("silu") self.assertIsInstance(UpperCamelCase_ , nn.SiLU) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = get_activation("mish") self.assertIsInstance(UpperCamelCase_ , nn.Mish) self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa)).item() , 0) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = get_activation("gelu") self.assertIsInstance(UpperCamelCase_ , nn.GELU) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
77
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_features", "is_longer"] def __init__( self : List[str] , UpperCamelCase_ : Dict=64 , UpperCamelCase_ : Tuple=48000 , UpperCamelCase_ : List[Any]=480 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=1024 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 14000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Union[str, Any] = top_db __UpperCAmelCase : Optional[Any] = truncation __UpperCAmelCase : str = padding __UpperCAmelCase : int = fft_window_size __UpperCAmelCase : str = (fft_window_size >> 1) + 1 __UpperCAmelCase : List[Any] = hop_length __UpperCAmelCase : Optional[Any] = max_length_s __UpperCAmelCase : Tuple = max_length_s * sampling_rate __UpperCAmelCase : str = sampling_rate __UpperCAmelCase : int = frequency_min __UpperCAmelCase : Optional[Any] = frequency_max __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale="htk" , ) __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm="slaney" , mel_scale="slaney" , ) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Dict = copy.deepcopy(self.__dict__) __UpperCAmelCase : str = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a_ ( self : int , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None): """simple docstring""" __UpperCAmelCase : List[Any] = spectrogram( UpperCamelCase_ , window_function(self.fft_window_size , "hann") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel="dB" , ) return log_mel_spectrogram.T def a_ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3) if len(ranges[1]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : str = [0] if len(ranges[2]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : Dict = [0] # randomly choose index for each part __UpperCAmelCase : Dict = np.random.choice(ranges[0]) __UpperCAmelCase : List[str] = np.random.choice(ranges[1]) __UpperCAmelCase : List[Any] = np.random.choice(ranges[2]) __UpperCAmelCase : List[Any] = mel[idx_front : idx_front + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_middle : idx_middle + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_back : idx_back + chunk_frames, :] __UpperCAmelCase : Tuple = torch.tensor(mel[None, None, :]) __UpperCAmelCase : Union[str, Any] = torch.nn.functional.interpolate( UpperCamelCase_ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = mel_shrink[0][0].numpy() __UpperCAmelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0) return mel_fusion def a_ ( self : Optional[Any] , UpperCamelCase_ : np.array , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]): """simple docstring""" if waveform.shape[0] > max_length: if truncation == "rand_trunc": __UpperCAmelCase : List[str] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad __UpperCAmelCase : List[Any] = len(UpperCamelCase_) - max_length __UpperCAmelCase : int = np.random.randint(0 , overflow + 1) __UpperCAmelCase : Union[str, Any] = waveform[idx : idx + max_length] __UpperCAmelCase : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] elif truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed __UpperCAmelCase : Tuple = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. __UpperCAmelCase : List[str] = np.stack([mel, mel, mel, mel] , axis=0) __UpperCAmelCase : Any = False else: __UpperCAmelCase : List[str] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = True else: raise NotImplementedError(F"data_truncating {truncation} not implemented") else: __UpperCAmelCase : Optional[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": __UpperCAmelCase : Tuple = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1))[:max_length] if padding == "repeatpad": __UpperCAmelCase : Union[str, Any] = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : Optional[Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : int = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0) if truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0) else: __UpperCAmelCase : Optional[int] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] return input_mel, longer def __call__( self : Dict , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Any , ): """simple docstring""" __UpperCAmelCase : int = truncation if truncation is not None else self.truncation __UpperCAmelCase : Optional[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" F" was sampled with {self.sampling_rate} and not {sampling_rate}.") else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug.") __UpperCAmelCase : List[str] = isinstance(UpperCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}") __UpperCAmelCase : str = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: __UpperCAmelCase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray): __UpperCAmelCase : Tuple = np.asarray(UpperCamelCase_ , dtype=np.floataa) elif isinstance(UpperCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): __UpperCAmelCase : Optional[int] = raw_speech.astype(np.floataa) # always return batch if not is_batched: __UpperCAmelCase : int = [np.asarray(UpperCamelCase_)] # convert to mel spectrogram, truncate and pad if needed. __UpperCAmelCase : Optional[int] = [ self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_) for waveform in raw_speech ] __UpperCAmelCase : Tuple = [] __UpperCAmelCase : List[Any] = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase_) is_longer.append(UpperCamelCase_) if truncation == "fusion" and sum(UpperCamelCase_) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer __UpperCAmelCase : Any = np.random.randint(0 , len(UpperCamelCase_)) __UpperCAmelCase : Optional[int] = True if isinstance(input_mel[0] , UpperCamelCase_): __UpperCAmelCase : Tuple = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for feature in input_mel] # is_longer is a list of bool __UpperCAmelCase : List[str] = [[longer] for longer in is_longer] __UpperCAmelCase : Optional[int] = {"input_features": input_mel, "is_longer": is_longer} __UpperCAmelCase : Optional[int] = BatchFeature(UpperCamelCase_) if return_tensors is not None: __UpperCAmelCase : Any = input_features.convert_to_tensors(UpperCamelCase_) return input_features
77
1
"""simple docstring""" import math def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = 0 , UpperCamelCase = 0 ) -> list: """simple docstring""" __UpperCAmelCase : Union[str, Any] = end or len(UpperCamelCase ) for i in range(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : List[Any] = i __UpperCAmelCase : Any = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __UpperCAmelCase : Dict = array[temp_index - 1] temp_index -= 1 __UpperCAmelCase : str = temp_index_value return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None: # Max Heap """simple docstring""" __UpperCAmelCase : Optional[Any] = index __UpperCAmelCase : List[str] = 2 * index + 1 # Left Node __UpperCAmelCase : Union[str, Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __UpperCAmelCase : Tuple = left_index if right_index < heap_size and array[largest] < array[right_index]: __UpperCAmelCase : int = right_index if largest != index: __UpperCAmelCase , __UpperCAmelCase : List[str] = array[largest], array[index] heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" __UpperCAmelCase : List[Any] = len(UpperCamelCase ) for i in range(n // 2 , -1 , -1 ): heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) for i in range(n - 1 , 0 , -1 ): __UpperCAmelCase , __UpperCAmelCase : int = array[0], array[i] heapify(UpperCamelCase , 0 , UpperCamelCase ) return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Optional[Any] = low __UpperCAmelCase : List[str] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __UpperCAmelCase , __UpperCAmelCase : Optional[int] = array[j], array[i] i += 1 def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" if len(UpperCamelCase ) == 0: return array __UpperCAmelCase : Optional[int] = 2 * math.ceil(math.loga(len(UpperCamelCase ) ) ) __UpperCAmelCase : List[Any] = 16 return intro_sort(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(UpperCamelCase ) max_depth -= 1 __UpperCAmelCase : List[Any] = median_of_a(UpperCamelCase , UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 ) __UpperCAmelCase : Union[str, Any] = partition(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) intro_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = p return insertion_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() A = input("""Enter numbers separated by a comma : """).strip() A = [float(item) for item in user_input.split(""",""")] print(sort(unsorted))
77
"""simple docstring""" import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[str]="<pad>" , UpperCamelCase_ : Union[str, Any]=125 , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: __UpperCAmelCase : int = [F"<extra_id_{i}>" for i in range(UpperCamelCase_)] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens __UpperCAmelCase : Dict = len(set(filter(lambda UpperCamelCase_: bool("extra_id" in str(UpperCamelCase_)) , UpperCamelCase_))) if extra_tokens != extra_ids: raise ValueError( F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to ByT5Tokenizer. In this case the additional_special_tokens must include the" " extra_ids tokens") __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else pad_token __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else eos_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else unk_token super().__init__( eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , extra_ids=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : List[str] = extra_ids __UpperCAmelCase : int = 2**8 # utf is 8 bits # define special tokens dict __UpperCAmelCase : Dict[int, str] = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } __UpperCAmelCase : Any = len(self.special_tokens_encoder) __UpperCAmelCase : List[Any] = len(UpperCamelCase_) for i, token in enumerate(UpperCamelCase_): __UpperCAmelCase : Union[str, Any] = self.vocab_size + i - n __UpperCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()} @property def a_ ( self : List[Any]): """simple docstring""" return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def a_ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCamelCase_)) + [1] return ([0] * len(UpperCamelCase_)) + [1] + ([0] * len(UpperCamelCase_)) + [1] def a_ ( self : Optional[Any] , UpperCamelCase_ : List[int]): """simple docstring""" if len(UpperCamelCase_) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" " eos tokens being added.") return token_ids else: return token_ids + [self.eos_token_id] def a_ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Dict = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos) * [0] return len(token_ids_a + eos + token_ids_a + eos) * [0] def a_ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Optional[Any] = self._add_eos_if_not_present(UpperCamelCase_) if token_ids_a is None: return token_ids_a else: __UpperCAmelCase : List[Any] = self._add_eos_if_not_present(UpperCamelCase_) return token_ids_a + token_ids_a def a_ ( self : List[str] , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : Any = [chr(UpperCamelCase_) for i in text.encode("utf-8")] return tokens def a_ ( self : Tuple , UpperCamelCase_ : List[Any]): """simple docstring""" if token in self.special_tokens_encoder: __UpperCAmelCase : Any = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: __UpperCAmelCase : int = self.added_tokens_encoder[token] elif len(UpperCamelCase_) != 1: __UpperCAmelCase : Optional[Any] = self.unk_token_id else: __UpperCAmelCase : Any = ord(UpperCamelCase_) + self._num_special_tokens return token_id def a_ ( self : Any , UpperCamelCase_ : List[str]): """simple docstring""" if index in self.special_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[index] else: __UpperCAmelCase : List[str] = chr(index - self._num_special_tokens) return token def a_ ( self : Dict , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : str = b"" for token in tokens: if token in self.special_tokens_decoder: __UpperCAmelCase : Tuple = self.special_tokens_decoder[token].encode("utf-8") elif token in self.added_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[token].encode("utf-8") elif token in self.special_tokens_encoder: __UpperCAmelCase : Optional[int] = token.encode("utf-8") elif token in self.added_tokens_encoder: __UpperCAmelCase : Optional[Any] = token.encode("utf-8") else: __UpperCAmelCase : Any = bytes([ord(UpperCamelCase_)]) bstring += tok_string __UpperCAmelCase : List[Any] = bstring.decode("utf-8" , errors="ignore") return string def a_ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" return ()
77
1
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging A = logging.get_logger(__name__) A = { """EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class a__ ( __magic_name__ ): lowercase_ = "gpt_neo" lowercase_ = ["past_key_values"] lowercase_ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : str , UpperCamelCase_ : str=50257 , UpperCamelCase_ : Optional[int]=2048 , UpperCamelCase_ : Dict=2048 , UpperCamelCase_ : int=24 , UpperCamelCase_ : int=[[["global", "local"], 12]] , UpperCamelCase_ : Dict=16 , UpperCamelCase_ : str=None , UpperCamelCase_ : int=256 , UpperCamelCase_ : str="gelu_new" , UpperCamelCase_ : str=0.0 , UpperCamelCase_ : Dict=0.0 , UpperCamelCase_ : Tuple=0.0 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Dict=1e-5 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[Any]=50256 , UpperCamelCase_ : Optional[Any]=50256 , **UpperCamelCase_ : str , ): """simple docstring""" __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : str = max_position_embeddings __UpperCAmelCase : Any = hidden_size __UpperCAmelCase : Any = num_layers __UpperCAmelCase : int = num_heads __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : Union[str, Any] = window_size __UpperCAmelCase : Tuple = activation_function __UpperCAmelCase : List[str] = resid_dropout __UpperCAmelCase : Optional[int] = embed_dropout __UpperCAmelCase : Tuple = attention_dropout __UpperCAmelCase : Optional[Any] = classifier_dropout __UpperCAmelCase : int = layer_norm_epsilon __UpperCAmelCase : Any = initializer_range __UpperCAmelCase : Union[str, Any] = use_cache __UpperCAmelCase : Union[str, Any] = bos_token_id __UpperCAmelCase : List[Any] = eos_token_id __UpperCAmelCase : Union[str, Any] = attention_types __UpperCAmelCase : Dict = self.expand_attention_types_params(UpperCamelCase_) if len(self.attention_layers) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " F"but is `len(config.attention_layers) = {len(self.attention_layers)}`, " F"`config.num_layers = {self.num_layers}`. " "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument.") super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_) @staticmethod def a_ ( UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : Optional[int] = [] for item in attention_types: for _ in range(item[1]): attentions.extend(item[0]) return attentions def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: """simple docstring""" import torch __UpperCAmelCase : Any = input.size() __UpperCAmelCase : Dict = len(UpperCamelCase ) __UpperCAmelCase : Any = shape[dimension] __UpperCAmelCase : List[Any] = torch.arange(0 , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1 __UpperCAmelCase : str = torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None] __UpperCAmelCase : List[str] = [slice(UpperCamelCase )] * rank __UpperCAmelCase : List[Any] = indices __UpperCAmelCase : Dict = input[s] __UpperCAmelCase : List[str] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[Any]: """simple docstring""" import torch __UpperCAmelCase : str = torch.arange(1 , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = torch.remainder(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[int] = remainders == 0 __UpperCAmelCase : Optional[Any] = candidates[divisor_indices] __UpperCAmelCase : List[str] = torch.max(UpperCamelCase ) return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" ) class a__ ( __magic_name__ ): @property def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : int = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}}) if self.use_past: self.fill_with_past_key_values_(UpperCamelCase_ , direction="inputs") __UpperCAmelCase : Dict = {0: "batch", 1: "past_sequence + sequence"} else: __UpperCAmelCase : List[Any] = {0: "batch", 1: "sequence"} return common_inputs @property def a_ ( self : int): """simple docstring""" return self._config.num_heads def a_ ( self : Dict , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ): """simple docstring""" __UpperCAmelCase : int = super(UpperCamelCase_ , self).generate_dummy_inputs( UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_) # We need to order the input in the way they appears in the forward() __UpperCAmelCase : str = OrderedDict({"input_ids": common_inputs["input_ids"]}) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch __UpperCAmelCase , __UpperCAmelCase : str = common_inputs["input_ids"].shape # Not using the same length for past_key_values __UpperCAmelCase : str = seqlen + 2 __UpperCAmelCase : int = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCAmelCase : Dict = [ (torch.zeros(UpperCamelCase_), torch.zeros(UpperCamelCase_)) for _ in range(self.num_layers) ] __UpperCAmelCase : Dict = common_inputs["attention_mask"] if self.use_past: __UpperCAmelCase : str = ordered_inputs["attention_mask"].dtype __UpperCAmelCase : List[Any] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_)] , dim=1) return ordered_inputs @property def a_ ( self : Any): """simple docstring""" return 13
77
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a__ ( unittest.TestCase ): def __init__( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=[10, 20, 30, 40] , UpperCamelCase_ : Tuple=[1, 1, 2, 1] , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict="relu" , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=None , ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : List[str] = image_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : Union[str, Any] = embeddings_size __UpperCAmelCase : Dict = hidden_sizes __UpperCAmelCase : Dict = depths __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : str = num_labels __UpperCAmelCase : Optional[int] = scope __UpperCAmelCase : Dict = len(UpperCamelCase_) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __UpperCAmelCase : Dict = self.get_config() return config, pixel_values def a_ ( self : Dict): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def a_ ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]): """simple docstring""" __UpperCAmelCase : List[str] = FlaxRegNetModel(config=UpperCamelCase_) __UpperCAmelCase : Dict = model(UpperCamelCase_) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a_ ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : List[Any] = self.num_labels __UpperCAmelCase : Tuple = FlaxRegNetForImageClassification(config=UpperCamelCase_) __UpperCAmelCase : str = model(UpperCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Any = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () lowercase_ = False lowercase_ = False lowercase_ = False def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = FlaxRegNetModelTester(self) __UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_) def a_ ( self : Dict): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a_ ( self : Tuple): """simple docstring""" return def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_) @unittest.skip(reason="RegNet does not use inputs_embeds") def a_ ( self : Union[str, Any]): """simple docstring""" pass @unittest.skip(reason="RegNet does not support input and output embeddings") def a_ ( self : Optional[int]): """simple docstring""" pass def a_ ( self : str): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : int = model_class(UpperCamelCase_) __UpperCAmelCase : Optional[int] = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Any = [*signature.parameters.keys()] __UpperCAmelCase : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase_) def a_ ( self : int): """simple docstring""" def check_hidden_states_output(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]): __UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : str = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase_) , expected_num_stages + 1) __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : List[str] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Optional[int] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): __UpperCAmelCase : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_) @jax.jit def model_jitted(UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int]): return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_) with self.subTest("JIT Enabled"): __UpperCAmelCase : Optional[Any] = model_jitted(**UpperCamelCase_).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): __UpperCAmelCase : Dict = model_jitted(**UpperCamelCase_).to_tuple() self.assertEqual(len(UpperCamelCase_) , len(UpperCamelCase_)) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_): self.assertEqual(jitted_output.shape , output.shape) def _UpperCamelCase ( ) -> Any: """simple docstring""" __UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_flax class a__ ( unittest.TestCase ): @cached_property def a_ ( self : Optional[int]): """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None @slow def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040") __UpperCAmelCase : Dict = self.default_image_processor __UpperCAmelCase : str = prepare_img() __UpperCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors="np") __UpperCAmelCase : Dict = model(**UpperCamelCase_) # verify the logits __UpperCAmelCase : Dict = (1, 1000) self.assertEqual(outputs.logits.shape , UpperCamelCase_) __UpperCAmelCase : Any = jnp.array([-0.4180, -1.5051, -3.4836]) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4))
77
1
"""simple docstring""" import requests A = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=""" def _UpperCamelCase ( UpperCamelCase ) -> None: """simple docstring""" # fetching a list of articles in json format __UpperCAmelCase : Union[str, Any] = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["articles"] , 1 ): print(f"{i}.) {article['title']}" ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
77
"""simple docstring""" from scipy.stats import spearmanr import datasets A = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ A = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ A = r"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def a_ ( self : Any): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float"), "references": datasets.Value("float"), }) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def a_ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False): """simple docstring""" __UpperCAmelCase : List[str] = spearmanr(UpperCamelCase_ , UpperCamelCase_) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
77
1
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" if len(UpperCamelCase ) != len(UpperCamelCase ): raise ValueError("String lengths must match!" ) __UpperCAmelCase : str = 0 for chara, chara in zip(UpperCamelCase , UpperCamelCase ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
77
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) A = {"""vocab_file""": """spiece.model"""} A = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } A = {"""bert_for_seq_generation""": 512} class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = [] lowercase_ = ["input_ids", "attention_mask"] def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="<::::>" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : List[Any] , ): """simple docstring""" __UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __UpperCAmelCase : Dict = vocab_file __UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(UpperCamelCase_) @property def a_ ( self : List[str]): """simple docstring""" return self.sp_model.get_piece_size() def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : int = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : int): """simple docstring""" __UpperCAmelCase : Optional[int] = self.__dict__.copy() __UpperCAmelCase : List[Any] = None return state def __setstate__( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs"): __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def a_ ( self : Any , UpperCamelCase_ : str): """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_) def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" return self.sp_model.piece_to_id(UpperCamelCase_) def a_ ( self : Tuple , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : int = self.sp_model.IdToPiece(UpperCamelCase_) return token def a_ ( self : Dict , UpperCamelCase_ : Optional[Any]): """simple docstring""" __UpperCAmelCase : int = [] __UpperCAmelCase : Tuple = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(UpperCamelCase_) + token __UpperCAmelCase : List[Any] = [] else: current_sub_tokens.append(UpperCamelCase_) out_string += self.sp_model.decode(UpperCamelCase_) return out_string.strip() def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : Tuple = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCamelCase_) elif not os.path.isfile(self.vocab_file): with open(UpperCamelCase_ , "wb") as fi: __UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_) return (out_vocab_file,)
77
1
"""simple docstring""" import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ProphetNetTokenizer lowercase_ = False def a_ ( self : List[Any]): """simple docstring""" super().setUp() __UpperCAmelCase : Optional[int] = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] __UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def a_ ( self : List[str] , UpperCamelCase_ : List[Any]): """simple docstring""" __UpperCAmelCase : Optional[int] = "UNwant\u00E9d,running" __UpperCAmelCase : Dict = "unwanted, running" return input_text, output_text def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file) __UpperCAmelCase : Optional[Any] = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(UpperCamelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_) , [9, 6, 7, 12, 10, 11]) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : str = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"]) def a_ ( self : List[Any]): """simple docstring""" __UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=UpperCamelCase_) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"]) self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"]) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Any = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"]) self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"]) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"]) self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"]) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Union[str, Any] = BasicTokenizer(do_lower_case=UpperCamelCase_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"]) self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"]) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=UpperCamelCase_) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"]) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : int = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"]) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : List[Any] = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"]) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : List[str] = BasicTokenizer(do_lower_case=UpperCamelCase_ , never_split=["[UNK]"]) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : List[str] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] __UpperCAmelCase : Optional[int] = {} for i, token in enumerate(UpperCamelCase_): __UpperCAmelCase : Tuple = i __UpperCAmelCase : Optional[Any] = WordpieceTokenizer(vocab=UpperCamelCase_ , unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize("") , []) self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"]) self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"]) @require_torch def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased") __UpperCAmelCase : Any = ["A long paragraph for summarization.", "Another paragraph for summarization."] __UpperCAmelCase : Dict = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] __UpperCAmelCase : Union[str, Any] = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt") self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[int] = list(batch.input_ids.numpy()[0]) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_) self.assertEqual((2, 9) , batch.input_ids.shape) self.assertEqual((2, 9) , batch.attention_mask.shape) def a_ ( self : Tuple): """simple docstring""" self.assertTrue(_is_whitespace(" ")) self.assertTrue(_is_whitespace("\t")) self.assertTrue(_is_whitespace("\r")) self.assertTrue(_is_whitespace("\n")) self.assertTrue(_is_whitespace("\u00A0")) self.assertFalse(_is_whitespace("A")) self.assertFalse(_is_whitespace("-")) def a_ ( self : List[str]): """simple docstring""" self.assertTrue(_is_control("\u0005")) self.assertFalse(_is_control("A")) self.assertFalse(_is_control(" ")) self.assertFalse(_is_control("\t")) self.assertFalse(_is_control("\r")) def a_ ( self : Tuple): """simple docstring""" self.assertTrue(_is_punctuation("-")) self.assertTrue(_is_punctuation("$")) self.assertTrue(_is_punctuation("`")) self.assertTrue(_is_punctuation(".")) self.assertFalse(_is_punctuation("A")) self.assertFalse(_is_punctuation(" ")) @slow def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased") __UpperCAmelCase : Tuple = tokenizer.encode("sequence builders" , add_special_tokens=UpperCamelCase_) __UpperCAmelCase : List[str] = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCamelCase_) __UpperCAmelCase : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_) __UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
77
"""simple docstring""" import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed A = """true""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=16 ) -> Tuple: """simple docstring""" set_seed(42 ) __UpperCAmelCase : Dict = RegressionModel() __UpperCAmelCase : Optional[Any] = deepcopy(UpperCamelCase ) __UpperCAmelCase : Any = RegressionDataset(length=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = DataLoader(UpperCamelCase , batch_size=UpperCamelCase ) model.to(accelerator.device ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.prepare(UpperCamelCase , UpperCamelCase ) return model, ddp_model, dataloader def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=False ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) __UpperCAmelCase : Dict = load_dataset("glue" , "mrpc" , split="validation" ) def tokenize_function(UpperCamelCase ): __UpperCAmelCase : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase , max_length=UpperCamelCase ) return outputs with accelerator.main_process_first(): __UpperCAmelCase : str = dataset.map( UpperCamelCase , batched=UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) __UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(UpperCamelCase ): if use_longest: return tokenizer.pad(UpperCamelCase , padding="longest" , return_tensors="pt" ) return tokenizer.pad(UpperCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" ) return DataLoader(UpperCamelCase , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=16 ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[Any] = Accelerator(dispatch_batches=UpperCamelCase , split_batches=UpperCamelCase ) __UpperCAmelCase : int = get_dataloader(UpperCamelCase , not dispatch_batches ) __UpperCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" , return_dict=UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(UpperCamelCase , UpperCamelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Dict = [] for batch in dataloader: __UpperCAmelCase , __UpperCAmelCase : int = batch.values() with torch.no_grad(): __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = [], [] for logit, targ in logits_and_targets: logits.append(UpperCamelCase ) targs.append(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = torch.cat(UpperCamelCase ), torch.cat(UpperCamelCase ) return logits, targs def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=16 ) -> int: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = get_basic_setup(UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = generate_predictions(UpperCamelCase , UpperCamelCase , UpperCamelCase ) assert ( len(UpperCamelCase ) == num_samples ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCamelCase )}" def _UpperCamelCase ( UpperCamelCase = False , UpperCamelCase = False ) -> List[str]: """simple docstring""" __UpperCAmelCase : List[str] = evaluate.load("glue" , "mrpc" ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = get_mrpc_setup(UpperCamelCase , UpperCamelCase ) # First do baseline __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = setup["no"] model.to(UpperCamelCase ) model.eval() for batch in dataloader: batch.to(UpperCamelCase ) with torch.inference_mode(): __UpperCAmelCase : List[str] = model(**UpperCamelCase ) __UpperCAmelCase : str = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=UpperCamelCase , references=batch["labels"] ) __UpperCAmelCase : str = metric.compute() # Then do distributed __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): __UpperCAmelCase : Any = model(**UpperCamelCase ) __UpperCAmelCase : str = outputs.logits.argmax(dim=-1 ) __UpperCAmelCase : Union[str, Any] = batch["labels"] __UpperCAmelCase , __UpperCAmelCase : Any = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=UpperCamelCase , references=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n" def _UpperCamelCase ( ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Dict = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" ) test_mrpc(UpperCamelCase , UpperCamelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: __UpperCAmelCase : Union[str, Any] = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase ) if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" ) test_torch_metrics(UpperCamelCase , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) __UpperCAmelCase : Any = Accelerator() test_torch_metrics(UpperCamelCase , 512 ) accelerator.state._reset_state() def _UpperCamelCase ( UpperCamelCase ) -> Optional[Any]: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
77
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class a__ : def __init__( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any=13 , UpperCamelCase_ : List[str]=7 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : str=True , UpperCamelCase_ : Any=99 , UpperCamelCase_ : List[Any]=32 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Union[str, Any]=37 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Any=16 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : Optional[int]=0.02 , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : Any=None , ): """simple docstring""" __UpperCAmelCase : Dict = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : Union[str, Any] = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : int = hidden_size __UpperCAmelCase : Dict = num_hidden_layers __UpperCAmelCase : List[Any] = num_attention_heads __UpperCAmelCase : Optional[Any] = intermediate_size __UpperCAmelCase : Optional[Any] = hidden_act __UpperCAmelCase : int = hidden_dropout_prob __UpperCAmelCase : int = attention_probs_dropout_prob __UpperCAmelCase : Dict = max_position_embeddings __UpperCAmelCase : Tuple = type_vocab_size __UpperCAmelCase : Optional[int] = type_sequence_label_size __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : Dict = num_labels __UpperCAmelCase : Dict = num_choices __UpperCAmelCase : Dict = scope __UpperCAmelCase : List[str] = self.vocab_size - 1 def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __UpperCAmelCase : Union[str, Any] = None if self.use_token_type_ids: __UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size) __UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices) __UpperCAmelCase : Dict = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __UpperCAmelCase : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def a_ ( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , *UpperCamelCase_ : List[str]): """simple docstring""" __UpperCAmelCase : int = OpenAIGPTModel(config=UpperCamelCase_) model.to(UpperCamelCase_) model.eval() __UpperCAmelCase : Optional[int] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_) __UpperCAmelCase : Optional[int] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_) __UpperCAmelCase : Optional[int] = model(UpperCamelCase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def a_ ( self : str , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , *UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : Any = OpenAIGPTLMHeadModel(UpperCamelCase_) model.to(UpperCamelCase_) model.eval() __UpperCAmelCase : Optional[Any] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def a_ ( self : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , *UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : Any = OpenAIGPTDoubleHeadsModel(UpperCamelCase_) model.to(UpperCamelCase_) model.eval() __UpperCAmelCase : Optional[Any] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def a_ ( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , *UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : Dict = self.num_labels __UpperCAmelCase : List[str] = OpenAIGPTForSequenceClassification(UpperCamelCase_) model.to(UpperCamelCase_) model.eval() __UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size) __UpperCAmelCase : List[Any] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : str = config_and_inputs __UpperCAmelCase : Any = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class a__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase_ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) lowercase_ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly lowercase_ = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def a_ ( self : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : str): """simple docstring""" if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def a_ ( self : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any]=False): """simple docstring""" __UpperCAmelCase : Optional[Any] = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __UpperCAmelCase : List[str] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = inputs_dict["labels"] __UpperCAmelCase : Any = inputs_dict["labels"] __UpperCAmelCase : List[str] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCamelCase_ , ) __UpperCAmelCase : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_) return inputs_dict def a_ ( self : int): """simple docstring""" __UpperCAmelCase : str = OpenAIGPTModelTester(self) __UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37) def a_ ( self : Dict): """simple docstring""" self.config_tester.run_common_tests() def a_ ( self : List[Any]): """simple docstring""" __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*UpperCamelCase_) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*UpperCamelCase_) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCamelCase_) @slow def a_ ( self : Tuple): """simple docstring""" for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : List[str] = OpenAIGPTModel.from_pretrained(UpperCamelCase_) self.assertIsNotNone(UpperCamelCase_) @require_torch class a__ ( unittest.TestCase ): @slow def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt") model.to(UpperCamelCase_) __UpperCAmelCase : List[Any] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=UpperCamelCase_) # the president is __UpperCAmelCase : List[str] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the __UpperCAmelCase : str = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_) self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_)
77
"""simple docstring""" import math def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = 0 , UpperCamelCase = 0 ) -> list: """simple docstring""" __UpperCAmelCase : Union[str, Any] = end or len(UpperCamelCase ) for i in range(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : List[Any] = i __UpperCAmelCase : Any = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __UpperCAmelCase : Dict = array[temp_index - 1] temp_index -= 1 __UpperCAmelCase : str = temp_index_value return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None: # Max Heap """simple docstring""" __UpperCAmelCase : Optional[Any] = index __UpperCAmelCase : List[str] = 2 * index + 1 # Left Node __UpperCAmelCase : Union[str, Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __UpperCAmelCase : Tuple = left_index if right_index < heap_size and array[largest] < array[right_index]: __UpperCAmelCase : int = right_index if largest != index: __UpperCAmelCase , __UpperCAmelCase : List[str] = array[largest], array[index] heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" __UpperCAmelCase : List[Any] = len(UpperCamelCase ) for i in range(n // 2 , -1 , -1 ): heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) for i in range(n - 1 , 0 , -1 ): __UpperCAmelCase , __UpperCAmelCase : int = array[0], array[i] heapify(UpperCamelCase , 0 , UpperCamelCase ) return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Optional[Any] = low __UpperCAmelCase : List[str] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __UpperCAmelCase , __UpperCAmelCase : Optional[int] = array[j], array[i] i += 1 def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" if len(UpperCamelCase ) == 0: return array __UpperCAmelCase : Optional[int] = 2 * math.ceil(math.loga(len(UpperCamelCase ) ) ) __UpperCAmelCase : List[Any] = 16 return intro_sort(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(UpperCamelCase ) max_depth -= 1 __UpperCAmelCase : List[Any] = median_of_a(UpperCamelCase , UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 ) __UpperCAmelCase : Union[str, Any] = partition(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) intro_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = p return insertion_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() A = input("""Enter numbers separated by a comma : """).strip() A = [float(item) for item in user_input.split(""",""")] print(sort(unsorted))
77
1
"""simple docstring""" from __future__ import annotations A = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] A = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def _UpperCamelCase ( UpperCamelCase ) -> list[float]: """simple docstring""" __UpperCAmelCase : List[Any] = [] __UpperCAmelCase : int = len(UpperCamelCase ) for i in range(UpperCamelCase ): __UpperCAmelCase : float = -1 for j in range(i + 1 , UpperCamelCase ): if arr[i] < arr[j]: __UpperCAmelCase : int = arr[j] break result.append(UpperCamelCase ) return result def _UpperCamelCase ( UpperCamelCase ) -> list[float]: """simple docstring""" __UpperCAmelCase : int = [] for i, outer in enumerate(UpperCamelCase ): __UpperCAmelCase : float = -1 for inner in arr[i + 1 :]: if outer < inner: __UpperCAmelCase : Optional[int] = inner break result.append(UpperCamelCase ) return result def _UpperCamelCase ( UpperCamelCase ) -> list[float]: """simple docstring""" __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : list[float] = [] __UpperCAmelCase : list[float] = [-1] * arr_size for index in reversed(range(UpperCamelCase ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: __UpperCAmelCase : Optional[int] = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) A = ( """from __main__ import arr, next_greatest_element_slow, """ """next_greatest_element_fast, next_greatest_element""" ) print( """next_greatest_element_slow():""", timeit("""next_greatest_element_slow(arr)""", setup=setup), ) print( """next_greatest_element_fast():""", timeit("""next_greatest_element_fast(arr)""", setup=setup), ) print( """ next_greatest_element():""", timeit("""next_greatest_element(arr)""", setup=setup), )
77
"""simple docstring""" import numpy as np from PIL import Image def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : str = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Any = 0 __UpperCAmelCase : Dict = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Tuple = 0 # compute the shape of the output matrix __UpperCAmelCase : Optional[int] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __UpperCAmelCase : List[str] = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __UpperCAmelCase : str = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : int = 0 __UpperCAmelCase : int = 0 return updated_arr def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : List[str] = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : List[str] = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Any = 0 # compute the shape of the output matrix __UpperCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __UpperCAmelCase : str = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __UpperCAmelCase : Tuple = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Optional[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="""avgpooling""", verbose=True) # Loading the image A = Image.open("""path_to_image""") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
77
1
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: """simple docstring""" if index == r: for j in range(UpperCamelCase ): print(data[j] , end=" " ) print(" " ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location __UpperCAmelCase : Tuple = arr[i] combination_util(UpperCamelCase , UpperCamelCase , UpperCamelCase , index + 1 , UpperCamelCase , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: """simple docstring""" # A temporary array to store all combination one by one __UpperCAmelCase : Optional[Any] = [0] * r # Print all combination using temporary array 'data[]' combination_util(UpperCamelCase , UpperCamelCase , UpperCamelCase , 0 , UpperCamelCase , 0 ) if __name__ == "__main__": # Driver code to check the function above A = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
77
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: A = None A = logging.get_logger(__name__) A = """▁""" A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} A = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } A = { """google/pegasus-xsum""": 512, } class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PegasusTokenizer lowercase_ = ["input_ids", "attention_mask"] def __init__( self : str , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Any="<unk>" , UpperCamelCase_ : Tuple="<mask_2>" , UpperCamelCase_ : Any="<mask_1>" , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : str=103 , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" __UpperCAmelCase : Optional[int] = offset if additional_special_tokens is not None: if not isinstance(UpperCamelCase_ , UpperCamelCase_): raise TypeError( F"additional_special_tokens should be of type {type(UpperCamelCase_)}, but is" F" {type(UpperCamelCase_)}") __UpperCAmelCase : Any = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F"<unk_{i}>" for i in range(len(UpperCamelCase_) , self.offset - 1) ] if len(set(UpperCamelCase_)) != len(UpperCamelCase_): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.") __UpperCAmelCase : str = additional_special_tokens_extended else: __UpperCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset)] super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , pad_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = vocab_file __UpperCAmelCase : List[str] = False if not self.vocab_file else True def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : int = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens) + 3)): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" F" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}") return [1 if x in all_special_ids else 0 for x in seq] def a_ ( self : Union[str, Any] , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return self._special_token_mask(UpperCamelCase_) elif token_ids_a is None: return self._special_token_mask(UpperCamelCase_) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a) + [1] def a_ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None): """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def a_ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_): copyfile(self.vocab_file , UpperCamelCase_) return (out_vocab_file,)
77
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: A = None A = logging.get_logger(__name__) A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} A = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } A = { """google/bigbird-roberta-base""": 4_096, """google/bigbird-roberta-large""": 4_096, """google/bigbird-base-trivia-itc""": 4_096, } A = """▁""" class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = BigBirdTokenizer lowercase_ = ["input_ids", "attention_mask"] lowercase_ = [] def __init__( self : List[Any] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : List[str]="<unk>" , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : Optional[int]="</s>" , UpperCamelCase_ : List[Any]="<pad>" , UpperCamelCase_ : str="[SEP]" , UpperCamelCase_ : Optional[Any]="[MASK]" , UpperCamelCase_ : Union[str, Any]="[CLS]" , **UpperCamelCase_ : Tuple , ): """simple docstring""" __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else bos_token __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else eos_token __UpperCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else unk_token __UpperCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else pad_token __UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else cls_token __UpperCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else sep_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else mask_token super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Any = vocab_file __UpperCAmelCase : List[Any] = False if not self.vocab_file else True def a_ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : List[str] = [self.sep_token_id] __UpperCAmelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a_ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model.") return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_)) + [1] return [1] + ([0] * len(UpperCamelCase_)) + [1] + ([0] * len(UpperCamelCase_)) + [1] def a_ ( self : str , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : List[str] = [self.sep_token_id] __UpperCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def a_ ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : Optional[int] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_): copyfile(self.vocab_file , UpperCamelCase_) return (out_vocab_file,)
77
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: """simple docstring""" # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file __UpperCAmelCase : Optional[Any] = TapasConfig.from_json_file(UpperCamelCase ) # set absolute/relative position embeddings parameter __UpperCAmelCase : Optional[Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": __UpperCAmelCase : List[str] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "WTQ": # run_task_main.py hparams __UpperCAmelCase : Tuple = 4 __UpperCAmelCase : Any = True # hparam_utils.py hparams __UpperCAmelCase : Union[str, Any] = 0.664694 __UpperCAmelCase : Union[str, Any] = 0.207951 __UpperCAmelCase : int = 0.121194 __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : List[str] = True __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : List[str] = 0.0352513 __UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams __UpperCAmelCase : int = 4 __UpperCAmelCase : Optional[int] = False # hparam_utils.py hparams __UpperCAmelCase : int = 36.4519 __UpperCAmelCase : str = 0.903421 __UpperCAmelCase : Dict = 222.088 __UpperCAmelCase : Dict = True __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : Tuple = True __UpperCAmelCase : Any = 0.763141 __UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "TABFACT": __UpperCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase ) elif task == "MLM": __UpperCAmelCase : Tuple = TapasForMaskedLM(config=UpperCamelCase ) elif task == "INTERMEDIATE_PRETRAINING": __UpperCAmelCase : List[str] = TapasModel(config=UpperCamelCase ) else: raise ValueError(f"Task {task} not supported." ) print(f"Building PyTorch model from configuration: {config}" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Save pytorch-model (weights and configuration) print(f"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(UpperCamelCase ) # Save tokenizer files print(f"Save tokenizer files to {pytorch_dump_path}" ) __UpperCAmelCase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 ) tokenizer.save_pretrained(UpperCamelCase ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) A = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
77
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class a__ ( __magic_name__ ): def __init__( self : Tuple , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : int = data def __iter__( self : Optional[int]): """simple docstring""" for element in self.data: yield element def _UpperCamelCase ( UpperCamelCase=True ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase : str = Accelerator(even_batches=UpperCamelCase ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = False ) -> Tuple: """simple docstring""" if iterable: __UpperCAmelCase : Dict = DummyIterableDataset(torch.as_tensor(range(UpperCamelCase ) ) ) else: __UpperCAmelCase : List[Any] = TensorDataset(torch.as_tensor(range(UpperCamelCase ) ) ) __UpperCAmelCase : List[Any] = DataLoader(UpperCamelCase , batch_size=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = accelerator.prepare(UpperCamelCase ) return dl def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> str: """simple docstring""" __UpperCAmelCase : List[Any] = create_dataloader(accelerator=UpperCamelCase , dataset_size=UpperCamelCase , batch_size=UpperCamelCase ) __UpperCAmelCase : List[str] = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def _UpperCamelCase ( ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Optional[Any] = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( UpperCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( UpperCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def _UpperCamelCase ( ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : Any = create_accelerator(even_batches=UpperCamelCase ) verify_dataloader_batch_sizes( UpperCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( UpperCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def _UpperCamelCase ( ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : Optional[Any] = create_accelerator(even_batches=UpperCamelCase ) __UpperCAmelCase : Dict = torch.nn.Linear(1 , 1 ) __UpperCAmelCase : Tuple = accelerator.prepare(UpperCamelCase ) __UpperCAmelCase : Any = create_dataloader(UpperCamelCase , dataset_size=3 , batch_size=1 ) __UpperCAmelCase : Optional[int] = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(UpperCamelCase ): __UpperCAmelCase : Tuple = ddp_model(batch[0].float() ) __UpperCAmelCase : List[str] = output.sum() loss.backward() batch_idxs.append(UpperCamelCase ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def _UpperCamelCase ( UpperCamelCase ) -> Optional[Any]: """simple docstring""" with warnings.catch_warnings(record=UpperCamelCase ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , UpperCamelCase ) assert "only supported for multi-GPU" in str(w[-1].message ) def _UpperCamelCase ( ) -> Tuple: """simple docstring""" __UpperCAmelCase : int = True __UpperCAmelCase : Optional[int] = False __UpperCAmelCase : Any = create_accelerator(even_batches=UpperCamelCase ) __UpperCAmelCase : int = torch.nn.Linear(1 , 1 ) __UpperCAmelCase : int = accelerator.prepare(UpperCamelCase ) __UpperCAmelCase : Optional[Any] = create_dataloader(UpperCamelCase , dataset_size=3 , batch_size=1 ) __UpperCAmelCase : Any = create_dataloader(UpperCamelCase , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase ): __UpperCAmelCase : Tuple = train_dl.batch_sampler.even_batches __UpperCAmelCase : Optional[int] = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def _UpperCamelCase ( ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Optional[Any] = True __UpperCAmelCase : Any = False __UpperCAmelCase : str = create_accelerator(even_batches=UpperCamelCase ) __UpperCAmelCase : Any = torch.nn.Linear(1 , 1 ) __UpperCAmelCase : Union[str, Any] = accelerator.prepare(UpperCamelCase ) create_dataloader(UpperCamelCase , dataset_size=3 , batch_size=1 , iterable=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = create_dataloader(UpperCamelCase , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings("ignore" ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase ): __UpperCAmelCase : Any = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def _UpperCamelCase ( ) -> Tuple: """simple docstring""" __UpperCAmelCase : Optional[int] = create_accelerator() __UpperCAmelCase : str = torch.nn.Linear(1 , 1 ) __UpperCAmelCase : Union[str, Any] = accelerator.prepare(UpperCamelCase ) create_dataloader(UpperCamelCase , dataset_size=3 , batch_size=1 , iterable=UpperCamelCase ) with warnings.catch_warnings(record=UpperCamelCase ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase ): pass assert issubclass(w[-1].category , UpperCamelCase ) assert "only supported for map-style datasets" in str(w[-1].message ) def _UpperCamelCase ( ) -> Any: """simple docstring""" __UpperCAmelCase : int = create_accelerator() accelerator.print("Test that even_batches variable ensures uniform batches across processes" ) test_default_ensures_even_batch_sizes() accelerator.print("Run tests with even_batches disabled" ) test_can_disable_even_batches() accelerator.print("Test joining uneven inputs" ) test_can_join_uneven_inputs() accelerator.print("Test overriding even_batches when joining uneven inputs" ) test_join_can_override_even_batches() accelerator.print("Test overriding even_batches for mixed dataloader types" ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("Test join with non DDP distributed raises warning" ) __UpperCAmelCase : Optional[Any] = accelerator.state.distributed_type __UpperCAmelCase : str = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = original_state if __name__ == "__main__": main()
77
"""simple docstring""" from typing import Union import fire import torch from tqdm import tqdm def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = "cpu" , UpperCamelCase = None ) -> None: """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch.load(UpperCamelCase , map_location=UpperCamelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(UpperCamelCase , torch.Tensor ): raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" ) __UpperCAmelCase : Optional[Any] = v.half() if save_path is None: # overwrite src_path __UpperCAmelCase : str = src_path torch.save(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": fire.Fire(convert)
77
1
"""simple docstring""" from __future__ import annotations def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: """simple docstring""" # Checks if the entire collection has been sorted if len(UpperCamelCase ) <= 1 or n <= 1: return insert_next(UpperCamelCase , n - 1 ) rec_insertion_sort(UpperCamelCase , n - 1 ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Dict: """simple docstring""" # Checks order between adjacent elements if index >= len(UpperCamelCase ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __UpperCAmelCase , __UpperCAmelCase : Dict = ( collection[index], collection[index - 1], ) insert_next(UpperCamelCase , index + 1 ) if __name__ == "__main__": A = input("""Enter integers separated by spaces: """) A = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
77
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": A = pd.read_csv("""sample_data.csv""", header=None) A = df.shape[:1][0] # If you're using some other dataset input the target column A = df.iloc[:, 1:2] A = actual_data.values.reshape(len_data, 1) A = MinMaxScaler().fit_transform(actual_data) A = 10 A = 5 A = 20 A = len_data - periods * look_back A = actual_data[:division] A = actual_data[division - look_back :] A , A = [], [] A , A = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) A = np.array(train_x) A = np.array(test_x) A = np.array([list(i.ravel()) for i in train_y]) A = np.array([list(i.ravel()) for i in test_y]) A = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") A = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) A = model.predict(x_test)
77
1
"""simple docstring""" # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers A = """3""" print("""Python version:""", sys.version) print("""transformers version:""", transformers.__version__) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) print("""NCCL version:""", torch.cuda.nccl.version()) except ImportError: print("""Torch version:""", None) try: import deepspeed print("""DeepSpeed version:""", deepspeed.__version__) except ImportError: print("""DeepSpeed version:""", None) try: import tensorflow as tf print("""TensorFlow version:""", tf.__version__) print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU"""))) print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU"""))) except ImportError: print("""TensorFlow version:""", None)
77
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin A = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right A = 250_004 A = 250_020 @require_sentencepiece @require_tokenizers class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = MBartTokenizer lowercase_ = MBartTokenizerFast lowercase_ = True lowercase_ = True def a_ ( self : str): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase : Any = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Dict = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer.tokenize("This is a test") self.assertListEqual(UpperCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __UpperCAmelCase : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) __UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def a_ ( self : Dict): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __UpperCAmelCase : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"): __UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = tempfile.mkdtemp() __UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_) __UpperCAmelCase : Any = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) __UpperCAmelCase : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=True __UpperCAmelCase : Optional[int] = tempfile.mkdtemp() __UpperCAmelCase : Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : int = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=False __UpperCAmelCase : Tuple = tempfile.mkdtemp() __UpperCAmelCase : int = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way __UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : str = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) @require_torch @require_sentencepiece @require_tokenizers class a__ ( unittest.TestCase ): lowercase_ = "facebook/mbart-large-en-ro" lowercase_ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] lowercase_ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] lowercase_ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def a_ ( cls : int): """simple docstring""" __UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO") __UpperCAmelCase : Union[str, Any] = 1 return cls def a_ ( self : List[Any]): """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) def a_ ( self : Optional[int]): """simple docstring""" self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids) __UpperCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] __UpperCAmelCase : Optional[Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_) self.assertEqual(UpperCamelCase_ , UpperCamelCase_) self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , UpperCamelCase_) __UpperCAmelCase : Tuple = 10 __UpperCAmelCase : List[Any] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_).input_ids[0] self.assertEqual(ids[-2] , 2) self.assertEqual(ids[-1] , UpperCamelCase_) self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_) def a_ ( self : Any): """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [250026, 250001]) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : List[str] = tempfile.mkdtemp() __UpperCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCamelCase_) __UpperCAmelCase : List[Any] = MBartTokenizer.from_pretrained(UpperCamelCase_) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_) @require_torch def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors="pt") __UpperCAmelCase : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Dict = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , ) __UpperCAmelCase : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_) self.assertEqual((2, 14) , batch.input_ids.shape) self.assertEqual((2, 14) , batch.attention_mask.shape) __UpperCAmelCase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , []) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE]) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : List[str] = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors="pt") __UpperCAmelCase : Any = self.tokenizer( text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors="pt") __UpperCAmelCase : int = targets["input_ids"] __UpperCAmelCase : Any = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1] , 3) self.assertEqual(batch.decoder_input_ids.shape[1] , 10) @require_torch def a_ ( self : int): """simple docstring""" __UpperCAmelCase : int = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR") self.assertEqual( nested_simplify(UpperCamelCase_) , { # A, test, EOS, en_XX "input_ids": [[62, 3034, 2, 250004]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, } , )
77
1
"""simple docstring""" def _UpperCamelCase ( ) -> int: """simple docstring""" return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(UpperCamelCase , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(f'''{solution() = }''')
77
"""simple docstring""" from typing import Any class a__ : def __init__( self : List[str] , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : str = data __UpperCAmelCase : Optional[Any] = None class a__ : def __init__( self : Any): """simple docstring""" __UpperCAmelCase : Optional[int] = None def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.head while temp is not None: print(temp.data , end=" ") __UpperCAmelCase : Tuple = temp.next print() def a_ ( self : int , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : List[str] = Node(UpperCamelCase_) __UpperCAmelCase : str = self.head __UpperCAmelCase : Optional[int] = new_node def a_ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str): """simple docstring""" if node_data_a == node_data_a: return else: __UpperCAmelCase : int = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Tuple = node_a.next __UpperCAmelCase : List[Any] = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Optional[Any] = node_a.next if node_a is None or node_a is None: return __UpperCAmelCase , __UpperCAmelCase : Any = node_a.data, node_a.data if __name__ == "__main__": A = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("""After swapping""") ll.print_list()
77
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL A = logging.get_logger(__name__) def _UpperCamelCase ( UpperCamelCase ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(UpperCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(UpperCamelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(UpperCamelCase ): return [[videos]] raise ValueError(f"Could not make batched video from {videos}" ) class a__ ( __magic_name__ ): lowercase_ = ["pixel_values"] def __init__( self : Optional[int] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 255 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , **UpperCamelCase_ : int , ): """simple docstring""" super().__init__(**UpperCamelCase_) __UpperCAmelCase : Dict = size if size is not None else {"shortest_edge": 224} __UpperCAmelCase : Any = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_) __UpperCAmelCase : Any = crop_size if crop_size is not None else {"height": 224, "width": 224} __UpperCAmelCase : Tuple = get_size_dict(UpperCamelCase_ , param_name="crop_size") __UpperCAmelCase : int = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : str = do_center_crop __UpperCAmelCase : str = crop_size __UpperCAmelCase : List[Any] = resample __UpperCAmelCase : List[Any] = do_rescale __UpperCAmelCase : int = rescale_factor __UpperCAmelCase : Any = do_normalize __UpperCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __UpperCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD def a_ ( self : Union[str, Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Dict , ): """simple docstring""" __UpperCAmelCase : int = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_) if "shortest_edge" in size: __UpperCAmelCase : Optional[int] = get_resize_output_image_size(UpperCamelCase_ , size["shortest_edge"] , default_to_square=UpperCamelCase_) elif "height" in size and "width" in size: __UpperCAmelCase : List[str] = (size["height"], size["width"]) else: raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}") return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_) def a_ ( self : Dict , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Tuple , ): """simple docstring""" __UpperCAmelCase : int = get_size_dict(UpperCamelCase_) if "height" not in size or "width" not in size: raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}") return center_crop(UpperCamelCase_ , size=(size["height"], size["width"]) , data_format=UpperCamelCase_ , **UpperCamelCase_) def a_ ( self : int , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[int, float] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : str , ): """simple docstring""" return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_) def a_ ( self : Dict , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : List[str] , ): """simple docstring""" return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_) def a_ ( self : Optional[int] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): """simple docstring""" if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") # All transformations expect numpy arrays. __UpperCAmelCase : Any = to_numpy_array(UpperCamelCase_) if do_resize: __UpperCAmelCase : Optional[Any] = self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_) if do_center_crop: __UpperCAmelCase : Dict = self.center_crop(UpperCamelCase_ , size=UpperCamelCase_) if do_rescale: __UpperCAmelCase : List[Any] = self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_) if do_normalize: __UpperCAmelCase : Optional[Any] = self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_) __UpperCAmelCase : Optional[int] = to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_) return image def a_ ( self : Optional[Any] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : Dict , ): """simple docstring""" __UpperCAmelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : List[str] = resample if resample is not None else self.resample __UpperCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Optional[int] = image_std if image_std is not None else self.image_std __UpperCAmelCase : Tuple = size if size is not None else self.size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_) __UpperCAmelCase : int = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase_ , param_name="crop_size") if not valid_images(UpperCamelCase_): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") __UpperCAmelCase : Tuple = make_batched(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = [ [ self._preprocess_image( image=UpperCamelCase_ , do_resize=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , do_center_crop=UpperCamelCase_ , crop_size=UpperCamelCase_ , do_rescale=UpperCamelCase_ , rescale_factor=UpperCamelCase_ , do_normalize=UpperCamelCase_ , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ , data_format=UpperCamelCase_ , ) for img in video ] for video in videos ] __UpperCAmelCase : Optional[Any] = {"pixel_values": videos} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_)
77
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore A = """ Human: <<task>> Assistant: """ A = """huggingface-tools/default-prompts""" A = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""} def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase="run" ) -> List[str]: """simple docstring""" if prompt_or_repo_id is None: __UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , UpperCamelCase ) is not None: return prompt_or_repo_id __UpperCAmelCase : str = cached_file( UpperCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(UpperCamelCase , "r" , encoding="utf-8" ) as f: return f.read()
77
1
"""simple docstring""" import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = WavaVecaPhonemeCTCTokenizer lowercase_ = False def a_ ( self : Optional[Any]): """simple docstring""" super().setUp() __UpperCAmelCase : str = ( "<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː " "ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː " "ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 " "oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ " "pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ " "yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ " "əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ " "ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ " "ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ " "uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ " "ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ " "ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ " "ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4" ).split(" ") __UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_)))) __UpperCAmelCase : Any = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"} __UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as fp: fp.write(json.dumps(UpperCamelCase_) + "\n") def a_ ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Any=20 , UpperCamelCase_ : Optional[int]=5): """simple docstring""" __UpperCAmelCase : Optional[int] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_)) for i in range(len(UpperCamelCase_))] __UpperCAmelCase : List[str] = list(filter(lambda UpperCamelCase_: [t[0]] == tokenizer.encode(t[1] , do_phonemize=UpperCamelCase_) , UpperCamelCase_)) if max_length is not None and len(UpperCamelCase_) > max_length: __UpperCAmelCase : Optional[int] = toks[:max_length] if min_length is not None and len(UpperCamelCase_) < min_length and len(UpperCamelCase_) > 0: while len(UpperCamelCase_) < min_length: __UpperCAmelCase : Optional[int] = toks + toks # toks_str = [t[1] for t in toks] __UpperCAmelCase : Tuple = [t[0] for t in toks] # Ensure consistency __UpperCAmelCase : Union[str, Any] = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_) if " " not in output_txt and len(UpperCamelCase_) > 1: __UpperCAmelCase : Optional[Any] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_) + " " + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_) ) if with_prefix_space: __UpperCAmelCase : str = " " + output_txt __UpperCAmelCase : str = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_) return output_txt, output_ids def a_ ( self : Optional[int] , **UpperCamelCase_ : Dict): """simple docstring""" kwargs.update(self.special_tokens_map) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") # check adding a single token tokenizer.add_tokens("xxx") __UpperCAmelCase : Dict = tokenizer("m xxx ɪ" , do_phonemize=UpperCamelCase_).input_ids self.assertEqual(UpperCamelCase_ , [13, 392, 17]) # xxx should be last token tokenizer.add_tokens(["aaa", "bbb", "ccc"]) __UpperCAmelCase : List[Any] = tokenizer("m aaa ɪ ccc" , do_phonemize=UpperCamelCase_).input_ids self.assertEqual(UpperCamelCase_ , [13, 393, 17, 395]) # aaa and ccc should be after xxx and 2 after aaa __UpperCAmelCase : int = tokenizer("maɪ c" , do_phonemize=UpperCamelCase_).input_ids self.assertEqual(UpperCamelCase_ , [3, 200]) # mai should be <unk> (=3) def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") __UpperCAmelCase : int = "Hello how are you" __UpperCAmelCase : Optional[Any] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang="en-us") self.assertEqual(UpperCamelCase_ , "h ə l oʊ h aʊ ɑːɹ j uː") def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") __UpperCAmelCase : int = "Hello how are you" __UpperCAmelCase : Optional[int] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang="en-us") self.assertEqual(tokenizer(UpperCamelCase_).input_ids , tokenizer(UpperCamelCase_ , do_phonemize=UpperCamelCase_).input_ids) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") __UpperCAmelCase : Dict = "Hello how are you" __UpperCAmelCase : List[str] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang="en-us") __UpperCAmelCase : List[Any] = tokenizer.decode(tokenizer(UpperCamelCase_).input_ids) self.assertEqual(UpperCamelCase_ , UpperCamelCase_) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") __UpperCAmelCase : int = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] __UpperCAmelCase : Any = tokenizer.decode(sample_ids[0]) __UpperCAmelCase : Optional[int] = tokenizer.batch_decode(UpperCamelCase_) self.assertEqual(UpperCamelCase_ , batch_tokens[0]) self.assertEqual(UpperCamelCase_ , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"]) def a_ ( self : List[Any]): """simple docstring""" __UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|") tokenizer.add_tokens("|") __UpperCAmelCase : Optional[Any] = "Hello how are you" __UpperCAmelCase : Dict = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang="en-us") self.assertEqual(UpperCamelCase_ , "h ə l oʊ | h aʊ | ɑːɹ | j uː |") def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|") tokenizer.add_tokens("|") __UpperCAmelCase : int = "Hello how are you" __UpperCAmelCase : List[Any] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang="en-us") self.assertEqual(tokenizer(UpperCamelCase_).input_ids , tokenizer(UpperCamelCase_ , do_phonemize=UpperCamelCase_).input_ids) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : int = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|") tokenizer.add_tokens("|") # fmt: off __UpperCAmelCase : Optional[int] = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter __UpperCAmelCase : List[str] = tokenizer.decode(sample_ids[0]) __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase_) self.assertEqual(UpperCamelCase_ , batch_tokens[0]) self.assertEqual(UpperCamelCase_ , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"]) # decode with no word_del_token filter __UpperCAmelCase : List[Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=UpperCamelCase_) __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase_ , filter_word_delimiter_token=UpperCamelCase_) self.assertEqual(UpperCamelCase_ , batch_tokens[0]) self.assertEqual(UpperCamelCase_ , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"]) def a_ ( self : str): """simple docstring""" __UpperCAmelCase : int = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|") tokenizer.add_tokens("|") __UpperCAmelCase : List[str] = "Hello how are you" __UpperCAmelCase : str = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang="en-us") __UpperCAmelCase : Dict = tokenizer.decode(tokenizer(UpperCamelCase_).input_ids , filter_word_delimiter_token=UpperCamelCase_) self.assertEqual(UpperCamelCase_ , UpperCamelCase_) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Any = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|") tokenizer.add_tokens("|") __UpperCAmelCase : List[str] = "Hello how are you" __UpperCAmelCase : List[str] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang="en-us") __UpperCAmelCase : Tuple = tokenizer.decode(tokenizer(UpperCamelCase_).input_ids , filter_word_delimiter_token=UpperCamelCase_) self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |")]).strip() , UpperCamelCase_) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Any = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=UpperCamelCase_) __UpperCAmelCase : Any = "Hello how are you" __UpperCAmelCase : Optional[Any] = tokenizer(UpperCamelCase_ , phonemizer_lang="en-us").input_ids __UpperCAmelCase : List[Any] = tokenizer(UpperCamelCase_ , phonemizer_lang="fr-fr").input_ids self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : List[str] = tokenizer.decode(UpperCamelCase_) __UpperCAmelCase : Tuple = tokenizer.decode(UpperCamelCase_) self.assertEqual(UpperCamelCase_ , "h ə l oʊ h aʊ ɑːɹ j uː") self.assertEqual(UpperCamelCase_ , "ɛ l o h aʊ a ʁ j u") def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") __UpperCAmelCase : List[Any] = "Hello how Are you" __UpperCAmelCase : Tuple = "hello how are you" __UpperCAmelCase : str = tokenizer(UpperCamelCase_).input_ids __UpperCAmelCase : Dict = tokenizer(UpperCamelCase_).input_ids self.assertEqual(UpperCamelCase_ , UpperCamelCase_) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") tokenizer.add_tokens(["!", "?"]) tokenizer.add_special_tokens({"cls_token": "$$$"}) # fmt: off __UpperCAmelCase : Any = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394], ] # fmt: on __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase_) self.assertEqual(UpperCamelCase_ , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"]) @staticmethod def a_ ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]): """simple docstring""" __UpperCAmelCase : List[str] = [d[key] for d in offsets] return retrieved_list def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.get_tokenizer(word_delimiter_token="|") tokenizer.add_tokens("|") # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" __UpperCAmelCase : str = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on __UpperCAmelCase : Optional[int] = tokenizer.decode(UpperCamelCase_ , output_char_offsets=UpperCamelCase_ , filter_word_delimiter_token=UpperCamelCase_) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys()) , 2) self.assertTrue("text" in outputs) self.assertTrue("char_offsets" in outputs) self.assertTrue(isinstance(UpperCamelCase_ , UpperCamelCase_)) # check that order of chars is correct and identical for both outputs self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char")) , outputs.text) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"] , "char") , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"]) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs["char_offsets"] , "start_offset") , [0, 1, 4, 7, 9, 11, 12, 15, 16]) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"] , "end_offset") , [1, 4, 6, 9, 10, 12, 15, 16, 17]) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Any = self.get_tokenizer(word_delimiter_token="|") def check_list_tuples_equal(UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any]): self.assertTrue(isinstance(UpperCamelCase_ , UpperCamelCase_)) self.assertTrue(isinstance(outputs_list[0] , UpperCamelCase_)) # transform list to ModelOutput __UpperCAmelCase : Optional[Any] = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]}) self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"]) def recursive_check(UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any]): if isinstance(UpperCamelCase_ , UpperCamelCase_): [recursive_check(UpperCamelCase_ , UpperCamelCase_) for la, la in zip(UpperCamelCase_ , UpperCamelCase_)] self.assertEqual(UpperCamelCase_ , UpperCamelCase_) if "char_offsets" in outputs_batch: recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"]) # fmt: off __UpperCAmelCase : Union[str, Any] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char __UpperCAmelCase : List[Any] = tokenizer.batch_decode(UpperCamelCase_ , output_char_offsets=UpperCamelCase_) __UpperCAmelCase : Any = [tokenizer.decode(UpperCamelCase_ , output_char_offsets=UpperCamelCase_) for ids in sample_ids] check_list_tuples_equal(UpperCamelCase_ , UpperCamelCase_) @unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes") def a_ ( self : Any): """simple docstring""" pass @unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes") def a_ ( self : Tuple): """simple docstring""" pass @unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency") def a_ ( self : Any): """simple docstring""" pass @unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing") def a_ ( self : int): """simple docstring""" pass def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=UpperCamelCase_) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}"): __UpperCAmelCase : Union[str, Any] = tokenizer.vocab_size __UpperCAmelCase : Any = len(UpperCamelCase_) self.assertNotEqual(UpperCamelCase_ , 0) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) __UpperCAmelCase : Tuple = ["aaaaa bbbbbb", "cccccccccdddddddd"] __UpperCAmelCase : Optional[Any] = tokenizer.add_tokens(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = tokenizer.vocab_size __UpperCAmelCase : List[str] = len(UpperCamelCase_) self.assertNotEqual(UpperCamelCase_ , 0) self.assertEqual(UpperCamelCase_ , UpperCamelCase_) self.assertEqual(UpperCamelCase_ , len(UpperCamelCase_)) self.assertEqual(UpperCamelCase_ , all_size + len(UpperCamelCase_)) __UpperCAmelCase : List[str] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=UpperCamelCase_) self.assertGreaterEqual(len(UpperCamelCase_) , 4) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1) __UpperCAmelCase : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} __UpperCAmelCase : int = tokenizer.add_special_tokens(UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer.vocab_size __UpperCAmelCase : Optional[int] = len(UpperCamelCase_) self.assertNotEqual(UpperCamelCase_ , 0) self.assertEqual(UpperCamelCase_ , UpperCamelCase_) self.assertEqual(UpperCamelCase_ , len(UpperCamelCase_)) self.assertEqual(UpperCamelCase_ , all_size_a + len(UpperCamelCase_)) __UpperCAmelCase : List[str] = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=UpperCamelCase_) self.assertGreaterEqual(len(UpperCamelCase_) , 6) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1) self.assertGreater(tokens[0] , tokens[1]) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1) self.assertGreater(tokens[-3] , tokens[-4]) self.assertEqual(tokens[0] , tokenizer.eos_token_id) self.assertEqual(tokens[-3] , tokenizer.pad_token_id) @unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.") def a_ ( self : List[str]): """simple docstring""" pass @unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.") def a_ ( self : Tuple): """simple docstring""" pass def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Tuple = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}"): __UpperCAmelCase : Optional[int] = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"] __UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_string(UpperCamelCase_) self.assertIsInstance(output["text"] , UpperCamelCase_)
77
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available A = { """configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""", """ErnieForCausalLM""", """ErnieForMaskedLM""", """ErnieForMultipleChoice""", """ErnieForNextSentencePrediction""", """ErnieForPreTraining""", """ErnieForQuestionAnswering""", """ErnieForSequenceClassification""", """ErnieForTokenClassification""", """ErnieModel""", """ErniePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
1
"""simple docstring""" # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny model through reduction of a normal pre-trained model, but keeping the # full vocab, merges file, and thus also resulting in a larger model due to a large vocab size. # This gives ~3MB in total for all files. # # If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated # # # It will be used then as "stas/tiny-wmt19-en-de" # Build from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration A = """facebook/wmt19-en-de""" A = FSMTTokenizer.from_pretrained(mname) # get the correct vocab sizes, etc. from the master model A = FSMTConfig.from_pretrained(mname) config.update( dict( d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) ) A = FSMTForConditionalGeneration(config) print(f'''num of params {tiny_model.num_parameters()}''') # Test A = tokenizer(["""Making tiny model"""], return_tensors="""pt""") A = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save A = """tiny-wmt19-en-de""" tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f'''Generated {mname_tiny}''') # Upload # transformers-cli upload tiny-wmt19-en-de
77
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class a__ ( nn.Module ): def __init__( self : Union[str, Any]): """simple docstring""" super().__init__() __UpperCAmelCase : Optional[int] = nn.Linear(3 , 4) __UpperCAmelCase : str = nn.BatchNormad(4) __UpperCAmelCase : int = nn.Linear(4 , 5) def a_ ( self : str , UpperCamelCase_ : List[str]): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(UpperCamelCase_))) class a__ ( unittest.TestCase ): def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Optional[Any] = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , model.state_dict()) __UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase_ , "index.json") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: __UpperCAmelCase : Optional[int] = os.path.join(UpperCamelCase_ , F"{key}.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on the fact weights are properly loaded def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : int = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: __UpperCAmelCase : List[Any] = torch.randn(2 , 3 , dtype=UpperCamelCase_) with TemporaryDirectory() as tmp_dir: __UpperCAmelCase : Tuple = offload_weight(UpperCamelCase_ , "weight" , UpperCamelCase_ , {}) __UpperCAmelCase : Dict = os.path.join(UpperCamelCase_ , "weight.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) self.assertDictEqual(UpperCamelCase_ , {"weight": {"shape": [2, 3], "dtype": str(UpperCamelCase_).split(".")[1]}}) __UpperCAmelCase : Optional[Any] = load_offloaded_weight(UpperCamelCase_ , index["weight"]) self.assertTrue(torch.equal(UpperCamelCase_ , UpperCamelCase_)) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : List[Any] = ModelForTest() __UpperCAmelCase : Optional[int] = model.state_dict() __UpperCAmelCase : List[str] = {k: v for k, v in state_dict.items() if "linear2" not in k} __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "linear2" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : List[str] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "weight" in k} __UpperCAmelCase : Optional[Any] = {k: v for k, v in state_dict.items() if "weight" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[Any] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) # Duplicates are removed __UpperCAmelCase : str = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Any = {"a.1": 0, "a.10": 1, "a.2": 2} __UpperCAmelCase : Union[str, Any] = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1": 0, "a.2": 2}) __UpperCAmelCase : int = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2} __UpperCAmelCase : int = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1.a": 0, "a.2.a": 2})
77
1
"""simple docstring""" import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation A = logging.get_logger(__name__) A = {"""vocab_file""": """spiece.model"""} A = { """vocab_file""": { """AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""", } } A = { """AI-Sweden/gpt-sw3-126m""": 2_048, """AI-Sweden/gpt-sw3-350m""": 2_048, """AI-Sweden/gpt-sw3-1.6b""": 2_048, """AI-Sweden/gpt-sw3-6.7b""": 2_048, """AI-Sweden/gpt-sw3-20b""": 2_048, } class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = ["input_ids", "attention_mask"] def __init__( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Dict=False , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" __UpperCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs __UpperCAmelCase : Dict = kwargs.get("name_or_path") if name_or_path is None: logger.warning( "name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b," " you are testing the model, this can safely be ignored") __UpperCAmelCase : Optional[int] = "None" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __UpperCAmelCase : str = "<|endoftext|>" if eos_token is None else eos_token __UpperCAmelCase : Optional[Any] = "<unk>" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __UpperCAmelCase : Dict = unk_token if pad_token is None else pad_token __UpperCAmelCase : Any = eos_token if bos_token is None else bos_token else: __UpperCAmelCase : List[Any] = "<pad>" if pad_token is None else pad_token __UpperCAmelCase : List[str] = "<s>" if bos_token is None else bos_token super().__init__( do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = do_lower_case __UpperCAmelCase : List[str] = remove_space __UpperCAmelCase : Any = keep_accents __UpperCAmelCase : int = vocab_file __UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(UpperCamelCase_) # Used for whitespace normalization in input texts # fmt : off __UpperCAmelCase : int = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", "„"} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __UpperCAmelCase : Dict = re.compile( F"[{''.join(map(UpperCamelCase_ , list(range(0 , 9)) + list(range(11 , 32)) + list(range(127 , 160)) + [160, 173, 8203]))}]") def __getstate__( self : Any): """simple docstring""" __UpperCAmelCase : Tuple = self.__dict__.copy() __UpperCAmelCase : Optional[Any] = None return state def __setstate__( self : Union[str, Any] , UpperCamelCase_ : Optional[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs"): __UpperCAmelCase : Optional[Any] = {} __UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def a_ ( self : Any): """simple docstring""" return len(self.sp_model) def a_ ( self : Any , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : List[Any] = self.non_printing_characters_re.sub("" , UpperCamelCase_) # Normalize whitespaces __UpperCAmelCase : List[Any] = "".join([char if char not in self.whitespaces else " " for char in text]) # NFC Unicode normalization __UpperCAmelCase : Any = unicodedata.normalize("NFC" , UpperCamelCase_) return text def a_ ( self : Dict , UpperCamelCase_ : str , **UpperCamelCase_ : List[str]): """simple docstring""" __UpperCAmelCase : List[str] = self.preprocess_text(UpperCamelCase_) return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_) def a_ ( self : Union[str, Any] , UpperCamelCase_ : str): """simple docstring""" return self.sp_model.PieceToId(UpperCamelCase_) def a_ ( self : List[str] , UpperCamelCase_ : int): """simple docstring""" return self.sp_model.IdToPiece(UpperCamelCase_) @staticmethod def a_ ( UpperCamelCase_ : str): """simple docstring""" return out_string def a_ ( self : int , UpperCamelCase_ : List[str]): """simple docstring""" __UpperCAmelCase : List[str] = [] __UpperCAmelCase : Tuple = "" __UpperCAmelCase : Any = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCamelCase_) + token __UpperCAmelCase : int = True __UpperCAmelCase : Dict = [] else: current_sub_tokens.append(UpperCamelCase_) __UpperCAmelCase : List[Any] = False out_string += self.sp_model.decode(UpperCamelCase_) return out_string def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def a_ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCamelCase_) elif not os.path.isfile(self.vocab_file): with open(UpperCamelCase_ , "wb") as fi: __UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_) return (out_vocab_file,) def a_ ( self : Any , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : Union[str, bool] = False): """simple docstring""" if isinstance(UpperCamelCase_ , UpperCamelCase_): __UpperCAmelCase : Any = self.preprocess_text(UpperCamelCase_) __UpperCAmelCase : Any = self.sp_model.encode(UpperCamelCase_) else: __UpperCAmelCase : Dict = [self.preprocess_text(UpperCamelCase_) for t in text] __UpperCAmelCase : List[Any] = self.sp_model.encode(UpperCamelCase_) if return_tensors is True or return_tensors == "pt": __UpperCAmelCase : Optional[Any] = torch.tensor(UpperCamelCase_) return token_ids def a_ ( self : int , UpperCamelCase_ : Union[int, List[int]]): """simple docstring""" return self.sp_model.decode(UpperCamelCase_) def a_ ( self : Tuple , UpperCamelCase_ : "Conversation"): """simple docstring""" __UpperCAmelCase : int = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()] __UpperCAmelCase : Optional[Any] = ( F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(UpperCamelCase_) + F"{self.bos_token}Bot:" ) return self.encode(text=UpperCamelCase_)
77
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Dict = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): __UpperCAmelCase : Union[str, Any] = n - k # Calculate C(n,k) for i in range(UpperCamelCase ): result *= n - i result //= i + 1 return result def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" return binomial_coefficient(2 * node_count , UpperCamelCase ) // (node_count + 1) def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" if n < 0: raise ValueError("factorial() not defined for negative values" ) __UpperCAmelCase : Optional[Any] = 1 for i in range(1 , n + 1 ): result *= i return result def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" return catalan_number(UpperCamelCase ) * factorial(UpperCamelCase ) if __name__ == "__main__": A = int(input("""Enter the number of nodes: """).strip() or 0) if node_count <= 0: raise ValueError("""We need some nodes to work with.""") print( f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} ''' f'''binary trees and {catalan_number(node_count)} binary search trees.''' )
77
1
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : List[Any] = [1] __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = 0, 0, 0 __UpperCAmelCase : Tuple = ugly_nums[ia] * 2 __UpperCAmelCase : List[Any] = ugly_nums[ia] * 3 __UpperCAmelCase : Optional[int] = ugly_nums[ia] * 5 for _ in range(1 , UpperCamelCase ): __UpperCAmelCase : Union[str, Any] = min(UpperCamelCase , UpperCamelCase , UpperCamelCase ) ugly_nums.append(UpperCamelCase ) if next_num == next_a: ia += 1 __UpperCAmelCase : Dict = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 __UpperCAmelCase : Optional[int] = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 __UpperCAmelCase : Tuple = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f'''{ugly_numbers(200) = }''')
77
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) A = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def _UpperCamelCase ( UpperCamelCase=None ) -> int: """simple docstring""" if subparsers is not None: __UpperCAmelCase : List[str] = subparsers.add_parser("test" ) else: __UpperCAmelCase : Dict = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=UpperCamelCase , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=UpperCamelCase ) return parser def _UpperCamelCase ( UpperCamelCase ) -> str: """simple docstring""" __UpperCAmelCase : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: __UpperCAmelCase : List[str] = script_name else: __UpperCAmelCase : int = f"--config_file={args.config_file} {script_name}" __UpperCAmelCase : Dict = ["accelerate-launch"] + test_args.split() __UpperCAmelCase : Tuple = execute_subprocess_async(UpperCamelCase , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def _UpperCamelCase ( ) -> List[Any]: """simple docstring""" __UpperCAmelCase : List[Any] = test_command_parser() __UpperCAmelCase : Any = parser.parse_args() test_command(UpperCamelCase ) if __name__ == "__main__": main()
77
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a__ ( unittest.TestCase ): def __init__( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any=13 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : int=224 , UpperCamelCase_ : int=30 , UpperCamelCase_ : str=400 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ): """simple docstring""" __UpperCAmelCase : Tuple = size if size is not None else {"height": 18, "width": 18} __UpperCAmelCase : List[Any] = parent __UpperCAmelCase : Tuple = batch_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : List[Any] = image_size __UpperCAmelCase : str = min_resolution __UpperCAmelCase : Tuple = max_resolution __UpperCAmelCase : Optional[Any] = do_resize __UpperCAmelCase : Any = size __UpperCAmelCase : Any = do_normalize __UpperCAmelCase : Any = image_mean __UpperCAmelCase : Optional[Any] = image_std def a_ ( self : str): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ViTImageProcessor if is_vision_available() else None def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Optional[Any] = EfficientFormerImageProcessorTester(self) @property def a_ ( self : Union[str, Any]): """simple docstring""" return self.image_proc_tester.prepare_image_processor_dict() def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase_ , "image_mean")) self.assertTrue(hasattr(UpperCamelCase_ , "image_std")) self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize")) self.assertTrue(hasattr(UpperCamelCase_ , "do_resize")) self.assertTrue(hasattr(UpperCamelCase_ , "size")) def a_ ( self : Dict): """simple docstring""" pass def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __UpperCAmelCase : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image) # Test not batched input __UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray) # Test not batched input __UpperCAmelCase : Tuple = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Any = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __UpperCAmelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor) # Test not batched input __UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
77
1
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str: """simple docstring""" __UpperCAmelCase : int = len(UpperCamelCase ) __UpperCAmelCase : int = len(UpperCamelCase ) __UpperCAmelCase : int = ( first_str_length if first_str_length > second_str_length else second_str_length ) __UpperCAmelCase : list = [] for char_count in range(UpperCamelCase ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(UpperCamelCase ) if __name__ == "__main__": print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
77
"""simple docstring""" from collections import namedtuple A = namedtuple("""from_to""", """from_ to""") A = { """cubicmeter""": from_to(1, 1), """litre""": from_to(0.001, 1_000), """kilolitre""": from_to(1, 1), """gallon""": from_to(0.00454, 264.172), """cubicyard""": from_to(0.76455, 1.30795), """cubicfoot""": from_to(0.028, 35.3147), """cup""": from_to(0.000236588, 4226.75), } def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: """simple docstring""" if from_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'from_type' value: {from_type!r} Supported values are:\n" + ", ".join(UpperCamelCase ) ) if to_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n" + ", ".join(UpperCamelCase ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
77
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available A = { """configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""", """ErnieForCausalLM""", """ErnieForMaskedLM""", """ErnieForMultipleChoice""", """ErnieForNextSentencePrediction""", """ErnieForPreTraining""", """ErnieForQuestionAnswering""", """ErnieForSequenceClassification""", """ErnieForTokenClassification""", """ErnieModel""", """ErniePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ShapEPipeline lowercase_ = ["prompt"] lowercase_ = ["prompt"] lowercase_ = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] lowercase_ = False @property def a_ ( self : Optional[int]): """simple docstring""" return 32 @property def a_ ( self : Any): """simple docstring""" return 32 @property def a_ ( self : int): """simple docstring""" return self.time_input_dim * 4 @property def a_ ( self : List[Any]): """simple docstring""" return 8 @property def a_ ( self : List[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def a_ ( self : List[str]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(UpperCamelCase_) @property def a_ ( self : Any): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Union[str, Any] = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } __UpperCAmelCase : Dict = PriorTransformer(**UpperCamelCase_) return model @property def a_ ( self : Union[str, Any]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Tuple = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } __UpperCAmelCase : List[Any] = ShapERenderer(**UpperCamelCase_) return model def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.dummy_prior __UpperCAmelCase : str = self.dummy_text_encoder __UpperCAmelCase : int = self.dummy_tokenizer __UpperCAmelCase : int = self.dummy_renderer __UpperCAmelCase : Tuple = HeunDiscreteScheduler( beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , ) __UpperCAmelCase : str = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "renderer": renderer, "scheduler": scheduler, } return components def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any=0): """simple docstring""" if str(UpperCamelCase_).startswith("mps"): __UpperCAmelCase : List[Any] = torch.manual_seed(UpperCamelCase_) else: __UpperCAmelCase : str = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_) __UpperCAmelCase : List[Any] = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "np", } return inputs def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : str = "cpu" __UpperCAmelCase : Union[str, Any] = self.get_dummy_components() __UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase_)) __UpperCAmelCase : Union[str, Any] = output.images[0] __UpperCAmelCase : str = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __UpperCAmelCase : Union[str, Any] = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self : Tuple): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2]) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch_device == "cpu" __UpperCAmelCase : Optional[Any] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , ) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.get_dummy_components() __UpperCAmelCase : List[str] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : int = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Any = 2 __UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_) for key in inputs.keys(): if key in self.batch_params: __UpperCAmelCase : List[Any] = batch_size * [inputs[key]] __UpperCAmelCase : List[Any] = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_)[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class a__ ( unittest.TestCase ): def a_ ( self : List[str]): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy") __UpperCAmelCase : Optional[Any] = ShapEPipeline.from_pretrained("openai/shap-e") __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Dict = torch.Generator(device=UpperCamelCase_).manual_seed(0) __UpperCAmelCase : int = pipe( "a shark" , generator=UpperCamelCase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_)
77
1
"""simple docstring""" import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=5 ) -> Optional[Any]: """simple docstring""" # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count("<mask>" ) == 1 __UpperCAmelCase : Union[str, Any] = torch.tensor(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ).unsqueeze(0 ) # Batch size 1 __UpperCAmelCase : Tuple = model(UpperCamelCase )[0] # The last hidden-state is the first element of the output tuple __UpperCAmelCase : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() __UpperCAmelCase : str = logits[0, masked_index, :] __UpperCAmelCase : List[str] = logits.softmax(dim=0 ) __UpperCAmelCase , __UpperCAmelCase : int = prob.topk(k=UpperCamelCase , dim=0 ) __UpperCAmelCase : Optional[Any] = " ".join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(UpperCamelCase ) )] ) __UpperCAmelCase : Any = tokenizer.mask_token __UpperCAmelCase : List[Any] = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ): __UpperCAmelCase : Dict = predicted_token_bpe.replace("\u2581" , " " ) if " {0}".format(UpperCamelCase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(" {0}".format(UpperCamelCase ) , UpperCamelCase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(UpperCamelCase , UpperCamelCase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs A = CamembertTokenizer.from_pretrained("""camembert-base""") A = CamembertForMaskedLM.from_pretrained("""camembert-base""") model.eval() A = """Le camembert est <mask> :)""" print(fill_mask(masked_input, model, tokenizer, topk=3))
77
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_features", "is_longer"] def __init__( self : List[str] , UpperCamelCase_ : Dict=64 , UpperCamelCase_ : Tuple=48000 , UpperCamelCase_ : List[Any]=480 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=1024 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 14000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Union[str, Any] = top_db __UpperCAmelCase : Optional[Any] = truncation __UpperCAmelCase : str = padding __UpperCAmelCase : int = fft_window_size __UpperCAmelCase : str = (fft_window_size >> 1) + 1 __UpperCAmelCase : List[Any] = hop_length __UpperCAmelCase : Optional[Any] = max_length_s __UpperCAmelCase : Tuple = max_length_s * sampling_rate __UpperCAmelCase : str = sampling_rate __UpperCAmelCase : int = frequency_min __UpperCAmelCase : Optional[Any] = frequency_max __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale="htk" , ) __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm="slaney" , mel_scale="slaney" , ) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Dict = copy.deepcopy(self.__dict__) __UpperCAmelCase : str = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a_ ( self : int , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None): """simple docstring""" __UpperCAmelCase : List[Any] = spectrogram( UpperCamelCase_ , window_function(self.fft_window_size , "hann") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel="dB" , ) return log_mel_spectrogram.T def a_ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3) if len(ranges[1]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : str = [0] if len(ranges[2]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : Dict = [0] # randomly choose index for each part __UpperCAmelCase : Dict = np.random.choice(ranges[0]) __UpperCAmelCase : List[str] = np.random.choice(ranges[1]) __UpperCAmelCase : List[Any] = np.random.choice(ranges[2]) __UpperCAmelCase : List[Any] = mel[idx_front : idx_front + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_middle : idx_middle + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_back : idx_back + chunk_frames, :] __UpperCAmelCase : Tuple = torch.tensor(mel[None, None, :]) __UpperCAmelCase : Union[str, Any] = torch.nn.functional.interpolate( UpperCamelCase_ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = mel_shrink[0][0].numpy() __UpperCAmelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0) return mel_fusion def a_ ( self : Optional[Any] , UpperCamelCase_ : np.array , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]): """simple docstring""" if waveform.shape[0] > max_length: if truncation == "rand_trunc": __UpperCAmelCase : List[str] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad __UpperCAmelCase : List[Any] = len(UpperCamelCase_) - max_length __UpperCAmelCase : int = np.random.randint(0 , overflow + 1) __UpperCAmelCase : Union[str, Any] = waveform[idx : idx + max_length] __UpperCAmelCase : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] elif truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed __UpperCAmelCase : Tuple = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. __UpperCAmelCase : List[str] = np.stack([mel, mel, mel, mel] , axis=0) __UpperCAmelCase : Any = False else: __UpperCAmelCase : List[str] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = True else: raise NotImplementedError(F"data_truncating {truncation} not implemented") else: __UpperCAmelCase : Optional[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": __UpperCAmelCase : Tuple = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1))[:max_length] if padding == "repeatpad": __UpperCAmelCase : Union[str, Any] = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : Optional[Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : int = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0) if truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0) else: __UpperCAmelCase : Optional[int] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] return input_mel, longer def __call__( self : Dict , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Any , ): """simple docstring""" __UpperCAmelCase : int = truncation if truncation is not None else self.truncation __UpperCAmelCase : Optional[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" F" was sampled with {self.sampling_rate} and not {sampling_rate}.") else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug.") __UpperCAmelCase : List[str] = isinstance(UpperCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}") __UpperCAmelCase : str = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: __UpperCAmelCase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray): __UpperCAmelCase : Tuple = np.asarray(UpperCamelCase_ , dtype=np.floataa) elif isinstance(UpperCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): __UpperCAmelCase : Optional[int] = raw_speech.astype(np.floataa) # always return batch if not is_batched: __UpperCAmelCase : int = [np.asarray(UpperCamelCase_)] # convert to mel spectrogram, truncate and pad if needed. __UpperCAmelCase : Optional[int] = [ self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_) for waveform in raw_speech ] __UpperCAmelCase : Tuple = [] __UpperCAmelCase : List[Any] = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase_) is_longer.append(UpperCamelCase_) if truncation == "fusion" and sum(UpperCamelCase_) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer __UpperCAmelCase : Any = np.random.randint(0 , len(UpperCamelCase_)) __UpperCAmelCase : Optional[int] = True if isinstance(input_mel[0] , UpperCamelCase_): __UpperCAmelCase : Tuple = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for feature in input_mel] # is_longer is a list of bool __UpperCAmelCase : List[str] = [[longer] for longer in is_longer] __UpperCAmelCase : Optional[int] = {"input_features": input_mel, "is_longer": is_longer} __UpperCAmelCase : Optional[int] = BatchFeature(UpperCamelCase_) if return_tensors is not None: __UpperCAmelCase : Any = input_features.convert_to_tensors(UpperCamelCase_) return input_features
77
1
"""simple docstring""" # Algorithm for the pigeonhole sorting def _UpperCamelCase ( UpperCamelCase ) -> str: """simple docstring""" __UpperCAmelCase : List[str] = min(UpperCamelCase ) # min() finds the minimum value __UpperCAmelCase : List[str] = max(UpperCamelCase ) # max() finds the maximum value __UpperCAmelCase : List[str] = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size __UpperCAmelCase : Dict = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(UpperCamelCase , UpperCamelCase ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. __UpperCAmelCase : int = 0 for count in range(UpperCamelCase ): while holes[count] > 0: holes[count] -= 1 __UpperCAmelCase : List[Any] = count + min_val i += 1 def _UpperCamelCase ( ) -> Dict: """simple docstring""" __UpperCAmelCase : str = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(UpperCamelCase ) print("Sorted order is:" , " ".join(UpperCamelCase ) ) if __name__ == "__main__": main()
77
"""simple docstring""" import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[str]="<pad>" , UpperCamelCase_ : Union[str, Any]=125 , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: __UpperCAmelCase : int = [F"<extra_id_{i}>" for i in range(UpperCamelCase_)] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens __UpperCAmelCase : Dict = len(set(filter(lambda UpperCamelCase_: bool("extra_id" in str(UpperCamelCase_)) , UpperCamelCase_))) if extra_tokens != extra_ids: raise ValueError( F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to ByT5Tokenizer. In this case the additional_special_tokens must include the" " extra_ids tokens") __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else pad_token __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else eos_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else unk_token super().__init__( eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , extra_ids=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : List[str] = extra_ids __UpperCAmelCase : int = 2**8 # utf is 8 bits # define special tokens dict __UpperCAmelCase : Dict[int, str] = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } __UpperCAmelCase : Any = len(self.special_tokens_encoder) __UpperCAmelCase : List[Any] = len(UpperCamelCase_) for i, token in enumerate(UpperCamelCase_): __UpperCAmelCase : Union[str, Any] = self.vocab_size + i - n __UpperCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()} @property def a_ ( self : List[Any]): """simple docstring""" return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def a_ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCamelCase_)) + [1] return ([0] * len(UpperCamelCase_)) + [1] + ([0] * len(UpperCamelCase_)) + [1] def a_ ( self : Optional[Any] , UpperCamelCase_ : List[int]): """simple docstring""" if len(UpperCamelCase_) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" " eos tokens being added.") return token_ids else: return token_ids + [self.eos_token_id] def a_ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Dict = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos) * [0] return len(token_ids_a + eos + token_ids_a + eos) * [0] def a_ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Optional[Any] = self._add_eos_if_not_present(UpperCamelCase_) if token_ids_a is None: return token_ids_a else: __UpperCAmelCase : List[Any] = self._add_eos_if_not_present(UpperCamelCase_) return token_ids_a + token_ids_a def a_ ( self : List[str] , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : Any = [chr(UpperCamelCase_) for i in text.encode("utf-8")] return tokens def a_ ( self : Tuple , UpperCamelCase_ : List[Any]): """simple docstring""" if token in self.special_tokens_encoder: __UpperCAmelCase : Any = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: __UpperCAmelCase : int = self.added_tokens_encoder[token] elif len(UpperCamelCase_) != 1: __UpperCAmelCase : Optional[Any] = self.unk_token_id else: __UpperCAmelCase : Any = ord(UpperCamelCase_) + self._num_special_tokens return token_id def a_ ( self : Any , UpperCamelCase_ : List[str]): """simple docstring""" if index in self.special_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[index] else: __UpperCAmelCase : List[str] = chr(index - self._num_special_tokens) return token def a_ ( self : Dict , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : str = b"" for token in tokens: if token in self.special_tokens_decoder: __UpperCAmelCase : Tuple = self.special_tokens_decoder[token].encode("utf-8") elif token in self.added_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[token].encode("utf-8") elif token in self.special_tokens_encoder: __UpperCAmelCase : Optional[int] = token.encode("utf-8") elif token in self.added_tokens_encoder: __UpperCAmelCase : Optional[Any] = token.encode("utf-8") else: __UpperCAmelCase : Any = bytes([ord(UpperCamelCase_)]) bstring += tok_string __UpperCAmelCase : List[Any] = bstring.decode("utf-8" , errors="ignore") return string def a_ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" return ()
77
1
"""simple docstring""" import requests from bsa import BeautifulSoup def _UpperCamelCase ( UpperCamelCase = "AAPL" ) -> str: """simple docstring""" __UpperCAmelCase : Union[str, Any] = f"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}" __UpperCAmelCase : str = BeautifulSoup(requests.get(UpperCamelCase ).text , "html.parser" ) __UpperCAmelCase : str = "My(6px) Pos(r) smartphone_Mt(6px)" return soup.find("div" , class_=class_ ).find("span" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
77
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a__ ( unittest.TestCase ): def __init__( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=[10, 20, 30, 40] , UpperCamelCase_ : Tuple=[1, 1, 2, 1] , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict="relu" , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=None , ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : List[str] = image_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : Union[str, Any] = embeddings_size __UpperCAmelCase : Dict = hidden_sizes __UpperCAmelCase : Dict = depths __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : str = num_labels __UpperCAmelCase : Optional[int] = scope __UpperCAmelCase : Dict = len(UpperCamelCase_) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __UpperCAmelCase : Dict = self.get_config() return config, pixel_values def a_ ( self : Dict): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def a_ ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]): """simple docstring""" __UpperCAmelCase : List[str] = FlaxRegNetModel(config=UpperCamelCase_) __UpperCAmelCase : Dict = model(UpperCamelCase_) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a_ ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : List[Any] = self.num_labels __UpperCAmelCase : Tuple = FlaxRegNetForImageClassification(config=UpperCamelCase_) __UpperCAmelCase : str = model(UpperCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Any = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () lowercase_ = False lowercase_ = False lowercase_ = False def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = FlaxRegNetModelTester(self) __UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_) def a_ ( self : Dict): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a_ ( self : Tuple): """simple docstring""" return def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_) @unittest.skip(reason="RegNet does not use inputs_embeds") def a_ ( self : Union[str, Any]): """simple docstring""" pass @unittest.skip(reason="RegNet does not support input and output embeddings") def a_ ( self : Optional[int]): """simple docstring""" pass def a_ ( self : str): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : int = model_class(UpperCamelCase_) __UpperCAmelCase : Optional[int] = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Any = [*signature.parameters.keys()] __UpperCAmelCase : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase_) def a_ ( self : int): """simple docstring""" def check_hidden_states_output(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]): __UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : str = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase_) , expected_num_stages + 1) __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : List[str] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Optional[int] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): __UpperCAmelCase : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_) @jax.jit def model_jitted(UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int]): return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_) with self.subTest("JIT Enabled"): __UpperCAmelCase : Optional[Any] = model_jitted(**UpperCamelCase_).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): __UpperCAmelCase : Dict = model_jitted(**UpperCamelCase_).to_tuple() self.assertEqual(len(UpperCamelCase_) , len(UpperCamelCase_)) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_): self.assertEqual(jitted_output.shape , output.shape) def _UpperCamelCase ( ) -> Any: """simple docstring""" __UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_flax class a__ ( unittest.TestCase ): @cached_property def a_ ( self : Optional[int]): """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None @slow def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040") __UpperCAmelCase : Dict = self.default_image_processor __UpperCAmelCase : str = prepare_img() __UpperCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors="np") __UpperCAmelCase : Dict = model(**UpperCamelCase_) # verify the logits __UpperCAmelCase : Dict = (1, 1000) self.assertEqual(outputs.logits.shape , UpperCamelCase_) __UpperCAmelCase : Any = jnp.array([-0.4180, -1.5051, -3.4836]) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4))
77
1
"""simple docstring""" import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() A = logging.get_logger(__name__) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : int = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("encoder.deit.cls_token", "encoder.embeddings.cls_token"), ("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"), ("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"), ("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"), ("encoder.deit.norm.weight", "encoder.layernorm.weight"), ("encoder.deit.norm.bias", "encoder.layernorm.bias"), ] ) return rename_keys def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) __UpperCAmelCase : Optional[Any] = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" ) __UpperCAmelCase : List[Any] = in_proj_weight[ : encoder_config.hidden_size, : ] __UpperCAmelCase : List[str] = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] __UpperCAmelCase : List[Any] = in_proj_weight[ -encoder_config.hidden_size :, : ] def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: """simple docstring""" __UpperCAmelCase : str = dct.pop(UpperCamelCase ) __UpperCAmelCase : List[Any] = val def _UpperCamelCase ( UpperCamelCase ) -> str: """simple docstring""" if "handwritten" in checkpoint_url: __UpperCAmelCase : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: __UpperCAmelCase : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg" __UpperCAmelCase : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ).convert("RGB" ) return im @torch.no_grad() def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: """simple docstring""" __UpperCAmelCase : List[str] = ViTConfig(image_size=384 , qkv_bias=UpperCamelCase ) __UpperCAmelCase : Any = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: __UpperCAmelCase : Optional[int] = 768 elif "large" in checkpoint_url: # use ViT-large encoder __UpperCAmelCase : Any = 1024 __UpperCAmelCase : int = 4096 __UpperCAmelCase : Tuple = 24 __UpperCAmelCase : Any = 16 __UpperCAmelCase : Union[str, Any] = 1024 else: raise ValueError("Should either find 'base' or 'large' in checkpoint URL" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: __UpperCAmelCase : List[str] = False __UpperCAmelCase : Any = "relu" __UpperCAmelCase : Any = 1024 __UpperCAmelCase : str = True __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[int] = False # load HuggingFace model __UpperCAmelCase : Optional[Any] = ViTModel(UpperCamelCase , add_pooling_layer=UpperCamelCase ) __UpperCAmelCase : str = TrOCRForCausalLM(UpperCamelCase ) __UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase ) model.eval() # load state_dict of original model, rename some keys __UpperCAmelCase : Optional[int] = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="cpu" , check_hash=UpperCamelCase )["model"] __UpperCAmelCase : Optional[Any] = create_rename_keys(UpperCamelCase , UpperCamelCase ) for src, dest in rename_keys: rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase ) read_in_q_k_v(UpperCamelCase , UpperCamelCase ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): __UpperCAmelCase : Optional[int] = state_dict.pop(UpperCamelCase ) if key.startswith("decoder" ) and "output_projection" not in key: __UpperCAmelCase : Any = val else: __UpperCAmelCase : int = val # load state dict model.load_state_dict(UpperCamelCase ) # Check outputs on an image __UpperCAmelCase : List[str] = ViTImageProcessor(size=encoder_config.image_size ) __UpperCAmelCase : Union[str, Any] = RobertaTokenizer.from_pretrained("roberta-large" ) __UpperCAmelCase : Dict = TrOCRProcessor(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = processor(images=prepare_img(UpperCamelCase ) , return_tensors="pt" ).pixel_values # verify logits __UpperCAmelCase : Dict = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) __UpperCAmelCase : Optional[Any] = model(pixel_values=UpperCamelCase , decoder_input_ids=UpperCamelCase ) __UpperCAmelCase : List[Any] = outputs.logits __UpperCAmelCase : List[str] = torch.Size([1, 1, 5_0265] ) if "trocr-base-handwritten" in checkpoint_url: __UpperCAmelCase : str = torch.tensor( [-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] ) elif "trocr-large-handwritten" in checkpoint_url: __UpperCAmelCase : int = torch.tensor( [-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] ) elif "trocr-base-printed" in checkpoint_url: __UpperCAmelCase : str = torch.tensor( [-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] ) elif "trocr-large-printed" in checkpoint_url: __UpperCAmelCase : Union[str, Any] = torch.tensor( [-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , UpperCamelCase , atol=1e-3 ), "First elements of logits not as expected" Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(UpperCamelCase ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(UpperCamelCase ) if __name__ == "__main__": A = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""", type=str, help="""URL to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) A = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
77
"""simple docstring""" from scipy.stats import spearmanr import datasets A = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ A = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ A = r"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def a_ ( self : Any): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float"), "references": datasets.Value("float"), }) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def a_ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False): """simple docstring""" __UpperCAmelCase : List[str] = spearmanr(UpperCamelCase_ , UpperCamelCase_) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
77
1
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" while a != 0: __UpperCAmelCase , __UpperCAmelCase : str = b % a, a return b def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" if gcd(UpperCamelCase , UpperCamelCase ) != 1: __UpperCAmelCase : Optional[Any] = f"mod inverse of {a!r} and {m!r} does not exist" raise ValueError(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = 1, 0, a __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = 0, 1, m while va != 0: __UpperCAmelCase : List[str] = ua // va __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
77
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) A = {"""vocab_file""": """spiece.model"""} A = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } A = {"""bert_for_seq_generation""": 512} class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = [] lowercase_ = ["input_ids", "attention_mask"] def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="<::::>" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : List[Any] , ): """simple docstring""" __UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __UpperCAmelCase : Dict = vocab_file __UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(UpperCamelCase_) @property def a_ ( self : List[str]): """simple docstring""" return self.sp_model.get_piece_size() def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : int = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : int): """simple docstring""" __UpperCAmelCase : Optional[int] = self.__dict__.copy() __UpperCAmelCase : List[Any] = None return state def __setstate__( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs"): __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def a_ ( self : Any , UpperCamelCase_ : str): """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_) def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" return self.sp_model.piece_to_id(UpperCamelCase_) def a_ ( self : Tuple , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : int = self.sp_model.IdToPiece(UpperCamelCase_) return token def a_ ( self : Dict , UpperCamelCase_ : Optional[Any]): """simple docstring""" __UpperCAmelCase : int = [] __UpperCAmelCase : Tuple = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(UpperCamelCase_) + token __UpperCAmelCase : List[Any] = [] else: current_sub_tokens.append(UpperCamelCase_) out_string += self.sp_model.decode(UpperCamelCase_) return out_string.strip() def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : Tuple = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCamelCase_) elif not os.path.isfile(self.vocab_file): with open(UpperCamelCase_ , "wb") as fi: __UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_) return (out_vocab_file,)
77
1
"""simple docstring""" import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class a__ ( unittest.TestCase ): def a_ ( self : Any): """simple docstring""" debug_launcher(test_script.main) def a_ ( self : int): """simple docstring""" debug_launcher(test_ops.main)
77
"""simple docstring""" import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed A = """true""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=16 ) -> Tuple: """simple docstring""" set_seed(42 ) __UpperCAmelCase : Dict = RegressionModel() __UpperCAmelCase : Optional[Any] = deepcopy(UpperCamelCase ) __UpperCAmelCase : Any = RegressionDataset(length=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = DataLoader(UpperCamelCase , batch_size=UpperCamelCase ) model.to(accelerator.device ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.prepare(UpperCamelCase , UpperCamelCase ) return model, ddp_model, dataloader def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=False ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) __UpperCAmelCase : Dict = load_dataset("glue" , "mrpc" , split="validation" ) def tokenize_function(UpperCamelCase ): __UpperCAmelCase : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase , max_length=UpperCamelCase ) return outputs with accelerator.main_process_first(): __UpperCAmelCase : str = dataset.map( UpperCamelCase , batched=UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) __UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(UpperCamelCase ): if use_longest: return tokenizer.pad(UpperCamelCase , padding="longest" , return_tensors="pt" ) return tokenizer.pad(UpperCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" ) return DataLoader(UpperCamelCase , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=16 ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[Any] = Accelerator(dispatch_batches=UpperCamelCase , split_batches=UpperCamelCase ) __UpperCAmelCase : int = get_dataloader(UpperCamelCase , not dispatch_batches ) __UpperCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" , return_dict=UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(UpperCamelCase , UpperCamelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Dict = [] for batch in dataloader: __UpperCAmelCase , __UpperCAmelCase : int = batch.values() with torch.no_grad(): __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = [], [] for logit, targ in logits_and_targets: logits.append(UpperCamelCase ) targs.append(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = torch.cat(UpperCamelCase ), torch.cat(UpperCamelCase ) return logits, targs def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=16 ) -> int: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = get_basic_setup(UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = generate_predictions(UpperCamelCase , UpperCamelCase , UpperCamelCase ) assert ( len(UpperCamelCase ) == num_samples ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCamelCase )}" def _UpperCamelCase ( UpperCamelCase = False , UpperCamelCase = False ) -> List[str]: """simple docstring""" __UpperCAmelCase : List[str] = evaluate.load("glue" , "mrpc" ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = get_mrpc_setup(UpperCamelCase , UpperCamelCase ) # First do baseline __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = setup["no"] model.to(UpperCamelCase ) model.eval() for batch in dataloader: batch.to(UpperCamelCase ) with torch.inference_mode(): __UpperCAmelCase : List[str] = model(**UpperCamelCase ) __UpperCAmelCase : str = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=UpperCamelCase , references=batch["labels"] ) __UpperCAmelCase : str = metric.compute() # Then do distributed __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): __UpperCAmelCase : Any = model(**UpperCamelCase ) __UpperCAmelCase : str = outputs.logits.argmax(dim=-1 ) __UpperCAmelCase : Union[str, Any] = batch["labels"] __UpperCAmelCase , __UpperCAmelCase : Any = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=UpperCamelCase , references=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n" def _UpperCamelCase ( ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Dict = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" ) test_mrpc(UpperCamelCase , UpperCamelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: __UpperCAmelCase : Union[str, Any] = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase ) if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" ) test_torch_metrics(UpperCamelCase , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) __UpperCAmelCase : Any = Accelerator() test_torch_metrics(UpperCamelCase , 512 ) accelerator.state._reset_state() def _UpperCamelCase ( UpperCamelCase ) -> Optional[Any]: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
77
1
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": A = pd.read_csv("""sample_data.csv""", header=None) A = df.shape[:1][0] # If you're using some other dataset input the target column A = df.iloc[:, 1:2] A = actual_data.values.reshape(len_data, 1) A = MinMaxScaler().fit_transform(actual_data) A = 10 A = 5 A = 20 A = len_data - periods * look_back A = actual_data[:division] A = actual_data[division - look_back :] A , A = [], [] A , A = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) A = np.array(train_x) A = np.array(test_x) A = np.array([list(i.ravel()) for i in train_y]) A = np.array([list(i.ravel()) for i in test_y]) A = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") A = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) A = model.predict(x_test)
77
"""simple docstring""" import math def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = 0 , UpperCamelCase = 0 ) -> list: """simple docstring""" __UpperCAmelCase : Union[str, Any] = end or len(UpperCamelCase ) for i in range(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : List[Any] = i __UpperCAmelCase : Any = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __UpperCAmelCase : Dict = array[temp_index - 1] temp_index -= 1 __UpperCAmelCase : str = temp_index_value return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None: # Max Heap """simple docstring""" __UpperCAmelCase : Optional[Any] = index __UpperCAmelCase : List[str] = 2 * index + 1 # Left Node __UpperCAmelCase : Union[str, Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __UpperCAmelCase : Tuple = left_index if right_index < heap_size and array[largest] < array[right_index]: __UpperCAmelCase : int = right_index if largest != index: __UpperCAmelCase , __UpperCAmelCase : List[str] = array[largest], array[index] heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" __UpperCAmelCase : List[Any] = len(UpperCamelCase ) for i in range(n // 2 , -1 , -1 ): heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) for i in range(n - 1 , 0 , -1 ): __UpperCAmelCase , __UpperCAmelCase : int = array[0], array[i] heapify(UpperCamelCase , 0 , UpperCamelCase ) return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Optional[Any] = low __UpperCAmelCase : List[str] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __UpperCAmelCase , __UpperCAmelCase : Optional[int] = array[j], array[i] i += 1 def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" if len(UpperCamelCase ) == 0: return array __UpperCAmelCase : Optional[int] = 2 * math.ceil(math.loga(len(UpperCamelCase ) ) ) __UpperCAmelCase : List[Any] = 16 return intro_sort(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(UpperCamelCase ) max_depth -= 1 __UpperCAmelCase : List[Any] = median_of_a(UpperCamelCase , UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 ) __UpperCAmelCase : Union[str, Any] = partition(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) intro_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = p return insertion_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() A = input("""Enter numbers separated by a comma : """).strip() A = [float(item) for item in user_input.split(""",""")] print(sort(unsorted))
77
1
"""simple docstring""" import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() A = logging.get_logger(__name__) A = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""", """self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""", """self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """ctc_proj""", """mask_emb""": """masked_spec_embed""", } A = [ """ctc_proj""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: """simple docstring""" for attribute in key.split("." ): __UpperCAmelCase : Optional[int] = getattr(UpperCamelCase , UpperCamelCase ) if weight_type is not None: __UpperCAmelCase : Optional[int] = getattr(UpperCamelCase , UpperCamelCase ).shape else: __UpperCAmelCase : str = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": __UpperCAmelCase : List[str] = value elif weight_type == "weight_g": __UpperCAmelCase : Dict = value elif weight_type == "weight_v": __UpperCAmelCase : Optional[Any] = value elif weight_type == "bias": __UpperCAmelCase : Optional[int] = value else: __UpperCAmelCase : Dict = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Any: """simple docstring""" __UpperCAmelCase : int = [] __UpperCAmelCase : Union[str, Any] = fairseq_model.state_dict() __UpperCAmelCase : Dict = hf_model.feature_extractor for name, value in fairseq_dict.items(): __UpperCAmelCase : List[Any] = False if "conv_layers" in name: load_conv_layer( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , ) __UpperCAmelCase : Any = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: __UpperCAmelCase : Optional[Any] = True if "*" in mapped_key: __UpperCAmelCase : Optional[Any] = name.split(UpperCamelCase )[0].split("." )[-2] __UpperCAmelCase : Union[str, Any] = mapped_key.replace("*" , UpperCamelCase ) if "weight_g" in name: __UpperCAmelCase : Dict = "weight_g" elif "weight_v" in name: __UpperCAmelCase : List[Any] = "weight_v" elif "bias" in name and "relative_attention_bias" not in name: __UpperCAmelCase : Optional[int] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj __UpperCAmelCase : int = "weight" else: __UpperCAmelCase : str = None set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) continue if not is_used: unused_weights.append(UpperCamelCase ) logger.warning(f"Unused weights: {unused_weights}" ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[str] = full_name.split("conv_layers." )[-1] __UpperCAmelCase : Tuple = name.split("." ) __UpperCAmelCase : List[str] = int(items[0] ) __UpperCAmelCase : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __UpperCAmelCase : Union[str, Any] = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __UpperCAmelCase : Tuple = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) __UpperCAmelCase : Optional[int] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) __UpperCAmelCase : List[str] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(UpperCamelCase ) @torch.no_grad() def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> List[Any]: """simple docstring""" # load the pre-trained checkpoints __UpperCAmelCase : Optional[int] = torch.load(UpperCamelCase ) __UpperCAmelCase : Optional[int] = WavLMConfigOrig(checkpoint["cfg"] ) __UpperCAmelCase : List[str] = WavLMOrig(UpperCamelCase ) model.load_state_dict(checkpoint["model"] ) model.eval() if config_path is not None: __UpperCAmelCase : Optional[Any] = WavLMConfig.from_pretrained(UpperCamelCase ) else: __UpperCAmelCase : str = WavLMConfig() __UpperCAmelCase : str = WavLMModel(UpperCamelCase ) recursively_load_weights(UpperCamelCase , UpperCamelCase ) hf_wavlm.save_pretrained(UpperCamelCase ) if __name__ == "__main__": A = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") A = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
77
"""simple docstring""" import numpy as np from PIL import Image def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : str = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Any = 0 __UpperCAmelCase : Dict = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Tuple = 0 # compute the shape of the output matrix __UpperCAmelCase : Optional[int] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __UpperCAmelCase : List[str] = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __UpperCAmelCase : str = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : int = 0 __UpperCAmelCase : int = 0 return updated_arr def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : List[str] = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : List[str] = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Any = 0 # compute the shape of the output matrix __UpperCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __UpperCAmelCase : str = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __UpperCAmelCase : Tuple = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Optional[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="""avgpooling""", verbose=True) # Loading the image A = Image.open("""path_to_image""") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
77
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule A = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: A = None A = logging.get_logger(__name__) A = """▁""" A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} A = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } A = { """google/pegasus-xsum""": 512, } class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PegasusTokenizer lowercase_ = ["input_ids", "attention_mask"] def __init__( self : str , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Any="<unk>" , UpperCamelCase_ : Tuple="<mask_2>" , UpperCamelCase_ : Any="<mask_1>" , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : str=103 , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" __UpperCAmelCase : Optional[int] = offset if additional_special_tokens is not None: if not isinstance(UpperCamelCase_ , UpperCamelCase_): raise TypeError( F"additional_special_tokens should be of type {type(UpperCamelCase_)}, but is" F" {type(UpperCamelCase_)}") __UpperCAmelCase : Any = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F"<unk_{i}>" for i in range(len(UpperCamelCase_) , self.offset - 1) ] if len(set(UpperCamelCase_)) != len(UpperCamelCase_): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.") __UpperCAmelCase : str = additional_special_tokens_extended else: __UpperCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset)] super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , pad_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = vocab_file __UpperCAmelCase : List[str] = False if not self.vocab_file else True def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : int = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens) + 3)): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" F" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}") return [1 if x in all_special_ids else 0 for x in seq] def a_ ( self : Union[str, Any] , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return self._special_token_mask(UpperCamelCase_) elif token_ids_a is None: return self._special_token_mask(UpperCamelCase_) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a) + [1] def a_ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None): """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def a_ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_): copyfile(self.vocab_file , UpperCamelCase_) return (out_vocab_file,)
77
1
"""simple docstring""" import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def _UpperCamelCase ( UpperCamelCase = 8 ) -> str: """simple docstring""" __UpperCAmelCase : List[Any] = ascii_letters + digits + punctuation return "".join(secrets.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str: """simple docstring""" # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(UpperCamelCase ) __UpperCAmelCase : int = i // 3 __UpperCAmelCase : Optional[int] = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) __UpperCAmelCase : Optional[Any] = ( chars_incl + random(UpperCamelCase , quotient + remainder ) + random(UpperCamelCase , UpperCamelCase ) + random(UpperCamelCase , UpperCamelCase ) ) __UpperCAmelCase : Union[str, Any] = list(UpperCamelCase ) shuffle(UpperCamelCase ) return "".join(UpperCamelCase ) # random is a generalised function for letters, characters and numbers def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str: """simple docstring""" return "".join(secrets.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[Any]: """simple docstring""" pass # Put your code here... def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Any: """simple docstring""" pass # Put your code here... def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: """simple docstring""" pass # Put your code here... def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = 8 ) -> bool: """simple docstring""" if len(UpperCamelCase ) < min_length: # Your Password must be at least 8 characters long return False __UpperCAmelCase : Tuple = any(char in ascii_uppercase for char in password ) __UpperCAmelCase : Optional[int] = any(char in ascii_lowercase for char in password ) __UpperCAmelCase : Optional[int] = any(char in digits for char in password ) __UpperCAmelCase : Any = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def _UpperCamelCase ( ) -> Dict: """simple docstring""" __UpperCAmelCase : str = int(input("Please indicate the max length of your password: " ).strip() ) __UpperCAmelCase : Dict = input( "Please indicate the characters that must be in your password: " ).strip() print("Password generated:" , password_generator(UpperCamelCase ) ) print( "Alternative Password generated:" , alternative_password_generator(UpperCamelCase , UpperCamelCase ) , ) print("[If you are thinking of using this passsword, You better save it.]" ) if __name__ == "__main__": main()
77
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: """simple docstring""" # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file __UpperCAmelCase : Optional[Any] = TapasConfig.from_json_file(UpperCamelCase ) # set absolute/relative position embeddings parameter __UpperCAmelCase : Optional[Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": __UpperCAmelCase : List[str] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "WTQ": # run_task_main.py hparams __UpperCAmelCase : Tuple = 4 __UpperCAmelCase : Any = True # hparam_utils.py hparams __UpperCAmelCase : Union[str, Any] = 0.664694 __UpperCAmelCase : Union[str, Any] = 0.207951 __UpperCAmelCase : int = 0.121194 __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : List[str] = True __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : List[str] = 0.0352513 __UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams __UpperCAmelCase : int = 4 __UpperCAmelCase : Optional[int] = False # hparam_utils.py hparams __UpperCAmelCase : int = 36.4519 __UpperCAmelCase : str = 0.903421 __UpperCAmelCase : Dict = 222.088 __UpperCAmelCase : Dict = True __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : Tuple = True __UpperCAmelCase : Any = 0.763141 __UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "TABFACT": __UpperCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase ) elif task == "MLM": __UpperCAmelCase : Tuple = TapasForMaskedLM(config=UpperCamelCase ) elif task == "INTERMEDIATE_PRETRAINING": __UpperCAmelCase : List[str] = TapasModel(config=UpperCamelCase ) else: raise ValueError(f"Task {task} not supported." ) print(f"Building PyTorch model from configuration: {config}" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Save pytorch-model (weights and configuration) print(f"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(UpperCamelCase ) # Save tokenizer files print(f"Save tokenizer files to {pytorch_dump_path}" ) __UpperCAmelCase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 ) tokenizer.save_pretrained(UpperCamelCase ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) A = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
77
1
"""simple docstring""" import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class a__ : def __init__( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict=13 , UpperCamelCase_ : Tuple=7 , UpperCamelCase_ : int=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[Any]=99 , UpperCamelCase_ : List[Any]=32 , UpperCamelCase_ : Dict=5 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : List[str]=37 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : List[str]=512 , UpperCamelCase_ : Dict=16 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : List[str]=None , ): """simple docstring""" __UpperCAmelCase : Optional[int] = parent __UpperCAmelCase : int = batch_size __UpperCAmelCase : str = seq_length __UpperCAmelCase : Any = is_training __UpperCAmelCase : Dict = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : str = vocab_size __UpperCAmelCase : int = hidden_size __UpperCAmelCase : Any = num_hidden_layers __UpperCAmelCase : Any = num_attention_heads __UpperCAmelCase : Tuple = intermediate_size __UpperCAmelCase : int = hidden_act __UpperCAmelCase : Dict = hidden_dropout_prob __UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob __UpperCAmelCase : List[str] = max_position_embeddings __UpperCAmelCase : Optional[Any] = type_vocab_size __UpperCAmelCase : Optional[Any] = type_sequence_label_size __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : Optional[int] = num_labels __UpperCAmelCase : int = num_choices __UpperCAmelCase : Dict = scope def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __UpperCAmelCase : List[str] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length]) __UpperCAmelCase : Union[str, Any] = None if self.use_token_type_ids: __UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __UpperCAmelCase : Tuple = None __UpperCAmelCase : List[str] = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size) __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices) __UpperCAmelCase : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a_ ( self : int): """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def a_ ( self : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : Tuple = BioGptModel(config=UpperCamelCase_) model.to(UpperCamelCase_) model.eval() __UpperCAmelCase : Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_) __UpperCAmelCase : Tuple = model(UpperCamelCase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def a_ ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : int , ): """simple docstring""" __UpperCAmelCase : Optional[int] = BioGptForCausalLM(config=UpperCamelCase_) model.to(UpperCamelCase_) model.eval() __UpperCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def a_ ( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] , *UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : Union[str, Any] = BioGptModel(config=UpperCamelCase_) model.to(UpperCamelCase_) model.eval() # create attention mask __UpperCAmelCase : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCamelCase_) __UpperCAmelCase : List[Any] = self.seq_length // 2 __UpperCAmelCase : Any = 0 # first forward pass __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_).to_tuple() # create hypothetical next token and extent to next_input_ids __UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size) # change a random masked slice from input_ids __UpperCAmelCase : Optional[Any] = ids_tensor((1,) , UpperCamelCase_).item() + 1 __UpperCAmelCase : Dict = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1) __UpperCAmelCase : str = random_other_next_tokens # append to next input_ids and attn_mask __UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1) __UpperCAmelCase : Optional[int] = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=UpperCamelCase_)] , dim=1 , ) # get two different outputs __UpperCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_)["last_hidden_state"] __UpperCAmelCase : Any = model(UpperCamelCase_ , past_key_values=UpperCamelCase_ , attention_mask=UpperCamelCase_)["last_hidden_state"] # select random slice __UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item() __UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach() __UpperCAmelCase : Dict = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3)) def a_ ( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Any , *UpperCamelCase_ : List[Any]): """simple docstring""" __UpperCAmelCase : str = BioGptModel(config=UpperCamelCase_).to(UpperCamelCase_).eval() __UpperCAmelCase : Any = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCamelCase_) # first forward pass __UpperCAmelCase : int = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_) __UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __UpperCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size) __UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2) # append to next input_ids and __UpperCAmelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1) __UpperCAmelCase : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1) __UpperCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_)["last_hidden_state"] __UpperCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_)[ "last_hidden_state" ] # select random slice __UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item() __UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3)) def a_ ( self : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , *UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=False): """simple docstring""" __UpperCAmelCase : Dict = BioGptForCausalLM(UpperCamelCase_) model.to(UpperCamelCase_) if gradient_checkpointing: model.gradient_checkpointing_enable() __UpperCAmelCase : List[Any] = model(UpperCamelCase_ , labels=UpperCamelCase_) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def a_ ( self : str , UpperCamelCase_ : int , *UpperCamelCase_ : List[str]): """simple docstring""" __UpperCAmelCase : List[str] = BioGptModel(UpperCamelCase_) __UpperCAmelCase : List[str] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.001) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.01) def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Optional[Any]): """simple docstring""" __UpperCAmelCase : List[Any] = self.num_labels __UpperCAmelCase : Any = BioGptForTokenClassification(UpperCamelCase_) model.to(UpperCamelCase_) model.eval() __UpperCAmelCase : Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Dict = config_and_inputs __UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class a__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase_ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) lowercase_ = (BioGptForCausalLM,) if is_torch_available() else () lowercase_ = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) lowercase_ = False def a_ ( self : str): """simple docstring""" __UpperCAmelCase : Optional[int] = BioGptModelTester(self) __UpperCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37) def a_ ( self : List[Any]): """simple docstring""" self.config_tester.run_common_tests() def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : Union[str, Any] = type self.model_tester.create_and_check_model(*UpperCamelCase_) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*UpperCamelCase_) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*UpperCamelCase_ , gradient_checkpointing=UpperCamelCase_) def a_ ( self : List[Any]): """simple docstring""" __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*UpperCamelCase_) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*UpperCamelCase_) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*UpperCamelCase_) @slow def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : int = BioGptForCausalLM.from_pretrained("microsoft/biogpt") model.to(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = BioGptTokenizer.from_pretrained("microsoft/biogpt") __UpperCAmelCase : Tuple = "left" # Define PAD Token = EOS Token = 50256 __UpperCAmelCase : Tuple = tokenizer.eos_token __UpperCAmelCase : Union[str, Any] = model.config.eos_token_id # use different length sentences to test batching __UpperCAmelCase : str = [ "Hello, my dog is a little", "Today, I", ] __UpperCAmelCase : List[Any] = tokenizer(UpperCamelCase_ , return_tensors="pt" , padding=UpperCamelCase_) __UpperCAmelCase : List[Any] = inputs["input_ids"].to(UpperCamelCase_) __UpperCAmelCase : List[Any] = model.generate( input_ids=UpperCamelCase_ , attention_mask=inputs["attention_mask"].to(UpperCamelCase_) , ) __UpperCAmelCase : Dict = tokenizer(sentences[0] , return_tensors="pt").input_ids.to(UpperCamelCase_) __UpperCAmelCase : Tuple = model.generate(input_ids=UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() __UpperCAmelCase : Optional[Any] = tokenizer(sentences[1] , return_tensors="pt").input_ids.to(UpperCamelCase_) __UpperCAmelCase : str = model.generate(input_ids=UpperCamelCase_ , max_length=model.config.max_length - num_paddings) __UpperCAmelCase : Any = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_) __UpperCAmelCase : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase_) __UpperCAmelCase : Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = [ "Hello, my dog is a little bit bigger than a little bit.", "Today, I have a good idea of how to use the information", ] self.assertListEqual(UpperCamelCase_ , UpperCamelCase_) self.assertListEqual(UpperCamelCase_ , [non_padded_sentence, padded_sentence]) @slow def a_ ( self : List[Any]): """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : str = BioGptModel.from_pretrained(UpperCamelCase_) self.assertIsNotNone(UpperCamelCase_) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Any = 3 __UpperCAmelCase : Union[str, Any] = input_dict["input_ids"] __UpperCAmelCase : Any = input_ids.ne(1).to(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) __UpperCAmelCase : Dict = BioGptForSequenceClassification(UpperCamelCase_) model.to(UpperCamelCase_) model.eval() __UpperCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : int = 3 __UpperCAmelCase : List[Any] = "multi_label_classification" __UpperCAmelCase : str = input_dict["input_ids"] __UpperCAmelCase : Any = input_ids.ne(1).to(UpperCamelCase_) __UpperCAmelCase : List[str] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) __UpperCAmelCase : List[Any] = BioGptForSequenceClassification(UpperCamelCase_) model.to(UpperCamelCase_) model.eval() __UpperCAmelCase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) @require_torch class a__ ( unittest.TestCase ): @slow def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Optional[Any] = BioGptForCausalLM.from_pretrained("microsoft/biogpt") __UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4805, 9, 656, 21]]) __UpperCAmelCase : Any = model(UpperCamelCase_)[0] __UpperCAmelCase : Any = 42384 __UpperCAmelCase : str = torch.Size((1, 5, vocab_size)) self.assertEqual(output.shape , UpperCamelCase_) __UpperCAmelCase : List[Any] = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1e-4)) @slow def a_ ( self : int): """simple docstring""" __UpperCAmelCase : int = BioGptTokenizer.from_pretrained("microsoft/biogpt") __UpperCAmelCase : Union[str, Any] = BioGptForCausalLM.from_pretrained("microsoft/biogpt") model.to(UpperCamelCase_) torch.manual_seed(0) __UpperCAmelCase : Any = tokenizer("COVID-19 is" , return_tensors="pt").to(UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = model.generate( **UpperCamelCase_ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=UpperCamelCase_ , ) __UpperCAmelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCamelCase_) __UpperCAmelCase : Optional[int] = ( "COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the" " causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and" " territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK)," " and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and" " more than 800,000 deaths." ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
77
"""simple docstring""" from typing import Union import fire import torch from tqdm import tqdm def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = "cpu" , UpperCamelCase = None ) -> None: """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch.load(UpperCamelCase , map_location=UpperCamelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(UpperCamelCase , torch.Tensor ): raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" ) __UpperCAmelCase : Optional[Any] = v.half() if save_path is None: # overwrite src_path __UpperCAmelCase : str = src_path torch.save(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": fire.Fire(convert)
77
1
"""simple docstring""" from __future__ import annotations import time A = list[tuple[int, int]] A = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] A = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class a__ : def __init__( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Node | None): """simple docstring""" __UpperCAmelCase : Union[str, Any] = pos_x __UpperCAmelCase : List[str] = pos_y __UpperCAmelCase : Optional[int] = (pos_y, pos_x) __UpperCAmelCase : Union[str, Any] = goal_x __UpperCAmelCase : Optional[Any] = goal_y __UpperCAmelCase : List[str] = parent class a__ : def __init__( self : Optional[int] , UpperCamelCase_ : tuple[int, int] , UpperCamelCase_ : tuple[int, int]): """simple docstring""" __UpperCAmelCase : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCamelCase_) __UpperCAmelCase : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCamelCase_) __UpperCAmelCase : int = [self.start] __UpperCAmelCase : Tuple = False def a_ ( self : str): """simple docstring""" while self.node_queue: __UpperCAmelCase : Dict = self.node_queue.pop(0) if current_node.pos == self.target.pos: __UpperCAmelCase : List[str] = True return self.retrace_path(UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = self.get_successors(UpperCamelCase_) for node in successors: self.node_queue.append(UpperCamelCase_) if not self.reached: return [self.start.pos] return None def a_ ( self : Tuple , UpperCamelCase_ : Node): """simple docstring""" __UpperCAmelCase : Optional[Any] = [] for action in delta: __UpperCAmelCase : Union[str, Any] = parent.pos_x + action[1] __UpperCAmelCase : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(UpperCamelCase_) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(UpperCamelCase_ , UpperCamelCase_ , self.target.pos_y , self.target.pos_x , UpperCamelCase_)) return successors def a_ ( self : Any , UpperCamelCase_ : Node | None): """simple docstring""" __UpperCAmelCase : str = node __UpperCAmelCase : Dict = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) __UpperCAmelCase : List[Any] = current_node.parent path.reverse() return path class a__ : def __init__( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : List[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = BreadthFirstSearch(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : List[str] = BreadthFirstSearch(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : int = False def a_ ( self : Any): """simple docstring""" while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: __UpperCAmelCase : Any = self.fwd_bfs.node_queue.pop(0) __UpperCAmelCase : str = self.bwd_bfs.node_queue.pop(0) if current_bwd_node.pos == current_fwd_node.pos: __UpperCAmelCase : List[Any] = True return self.retrace_bidirectional_path( UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : List[str] = current_bwd_node __UpperCAmelCase : List[Any] = current_fwd_node __UpperCAmelCase : Dict = { self.fwd_bfs: self.fwd_bfs.get_successors(UpperCamelCase_), self.bwd_bfs: self.bwd_bfs.get_successors(UpperCamelCase_), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(UpperCamelCase_) if not self.reached: return [self.fwd_bfs.start.pos] return None def a_ ( self : List[Any] , UpperCamelCase_ : Node , UpperCamelCase_ : Node): """simple docstring""" __UpperCAmelCase : str = self.fwd_bfs.retrace_path(UpperCamelCase_) __UpperCAmelCase : int = self.bwd_bfs.retrace_path(UpperCamelCase_) bwd_path.pop() bwd_path.reverse() __UpperCAmelCase : Optional[Any] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() A = (0, 0) A = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) A = time.time() A = BreadthFirstSearch(init, goal) A = bfs.search() A = time.time() - start_bfs_time print("""Unidirectional BFS computation time : """, bfs_time) A = time.time() A = BidirectionalBreadthFirstSearch(init, goal) A = bd_bfs.search() A = time.time() - start_bd_bfs_time print("""Bidirectional BFS computation time : """, bd_bfs_time)
77
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": A = pd.read_csv("""sample_data.csv""", header=None) A = df.shape[:1][0] # If you're using some other dataset input the target column A = df.iloc[:, 1:2] A = actual_data.values.reshape(len_data, 1) A = MinMaxScaler().fit_transform(actual_data) A = 10 A = 5 A = 20 A = len_data - periods * look_back A = actual_data[:division] A = actual_data[division - look_back :] A , A = [], [] A , A = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) A = np.array(train_x) A = np.array(test_x) A = np.array([list(i.ravel()) for i in train_y]) A = np.array([list(i.ravel()) for i in test_y]) A = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") A = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) A = model.predict(x_test)
77
1
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str: """simple docstring""" __UpperCAmelCase : Any = "" for word_or_phrase in separated: if not isinstance(UpperCamelCase , UpperCamelCase ): raise Exception("join() accepts only strings to be joined" ) joined += word_or_phrase + separator return joined.strip(UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
77
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin A = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right A = 250_004 A = 250_020 @require_sentencepiece @require_tokenizers class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = MBartTokenizer lowercase_ = MBartTokenizerFast lowercase_ = True lowercase_ = True def a_ ( self : str): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase : Any = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Dict = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer.tokenize("This is a test") self.assertListEqual(UpperCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __UpperCAmelCase : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) __UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def a_ ( self : Dict): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __UpperCAmelCase : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"): __UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = tempfile.mkdtemp() __UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_) __UpperCAmelCase : Any = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) __UpperCAmelCase : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=True __UpperCAmelCase : Optional[int] = tempfile.mkdtemp() __UpperCAmelCase : Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : int = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=False __UpperCAmelCase : Tuple = tempfile.mkdtemp() __UpperCAmelCase : int = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way __UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : str = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) @require_torch @require_sentencepiece @require_tokenizers class a__ ( unittest.TestCase ): lowercase_ = "facebook/mbart-large-en-ro" lowercase_ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] lowercase_ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] lowercase_ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def a_ ( cls : int): """simple docstring""" __UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO") __UpperCAmelCase : Union[str, Any] = 1 return cls def a_ ( self : List[Any]): """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) def a_ ( self : Optional[int]): """simple docstring""" self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids) __UpperCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] __UpperCAmelCase : Optional[Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_) self.assertEqual(UpperCamelCase_ , UpperCamelCase_) self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , UpperCamelCase_) __UpperCAmelCase : Tuple = 10 __UpperCAmelCase : List[Any] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_).input_ids[0] self.assertEqual(ids[-2] , 2) self.assertEqual(ids[-1] , UpperCamelCase_) self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_) def a_ ( self : Any): """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [250026, 250001]) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : List[str] = tempfile.mkdtemp() __UpperCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCamelCase_) __UpperCAmelCase : List[Any] = MBartTokenizer.from_pretrained(UpperCamelCase_) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_) @require_torch def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors="pt") __UpperCAmelCase : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Dict = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , ) __UpperCAmelCase : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_) self.assertEqual((2, 14) , batch.input_ids.shape) self.assertEqual((2, 14) , batch.attention_mask.shape) __UpperCAmelCase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , []) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE]) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : List[str] = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors="pt") __UpperCAmelCase : Any = self.tokenizer( text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors="pt") __UpperCAmelCase : int = targets["input_ids"] __UpperCAmelCase : Any = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1] , 3) self.assertEqual(batch.decoder_input_ids.shape[1] , 10) @require_torch def a_ ( self : int): """simple docstring""" __UpperCAmelCase : int = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR") self.assertEqual( nested_simplify(UpperCamelCase_) , { # A, test, EOS, en_XX "input_ids": [[62, 3034, 2, 250004]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, } , )
77
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Any , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : int , *UpperCamelCase_ : Dict , **UpperCamelCase_ : int): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : List[Any] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : List[str]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Optional[Any] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : List[Any]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : List[str] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[Any]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Tuple , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Dict , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Tuple): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Union[str, Any] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Optional[Any] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : List[str]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[Any] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Any , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[str] , *UpperCamelCase_ : int , **UpperCamelCase_ : Any): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Union[str, Any] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : str): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : List[str] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Optional[int]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Any , *UpperCamelCase_ : int , **UpperCamelCase_ : str): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : str , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Optional[Any]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : List[str] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[int]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[str] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : str): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Dict , *UpperCamelCase_ : Any , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : int , *UpperCamelCase_ : Dict , **UpperCamelCase_ : str): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[str] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Optional[int]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Tuple , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Any): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Optional[int] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[str]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[str] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Tuple , *UpperCamelCase_ : Any , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Optional[int] , *UpperCamelCase_ : str , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[Any] , *UpperCamelCase_ : Any , **UpperCamelCase_ : List[str]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Any , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : str): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Any , *UpperCamelCase_ : Any , **UpperCamelCase_ : List[Any]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : Any): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : List[str] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[int]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Optional[int] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Optional[Any]): """simple docstring""" requires_backends(cls , ["torch"]) def _UpperCamelCase ( *UpperCamelCase , **UpperCamelCase ) -> Optional[int]: """simple docstring""" requires_backends(UpperCamelCase , ["torch"] ) def _UpperCamelCase ( *UpperCamelCase , **UpperCamelCase ) -> List[Any]: """simple docstring""" requires_backends(UpperCamelCase , ["torch"] ) def _UpperCamelCase ( *UpperCamelCase , **UpperCamelCase ) -> Optional[int]: """simple docstring""" requires_backends(UpperCamelCase , ["torch"] ) def _UpperCamelCase ( *UpperCamelCase , **UpperCamelCase ) -> Dict: """simple docstring""" requires_backends(UpperCamelCase , ["torch"] ) def _UpperCamelCase ( *UpperCamelCase , **UpperCamelCase ) -> Optional[int]: """simple docstring""" requires_backends(UpperCamelCase , ["torch"] ) def _UpperCamelCase ( *UpperCamelCase , **UpperCamelCase ) -> List[Any]: """simple docstring""" requires_backends(UpperCamelCase , ["torch"] ) def _UpperCamelCase ( *UpperCamelCase , **UpperCamelCase ) -> List[Any]: """simple docstring""" requires_backends(UpperCamelCase , ["torch"] ) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Optional[int] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Any): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Optional[int] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Tuple): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : str , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : str): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Tuple , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : int): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Union[str, Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[int]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Tuple , *UpperCamelCase_ : Dict , **UpperCamelCase_ : List[str]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Tuple , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : List[Any]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : List[str] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : str , *UpperCamelCase_ : int , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Optional[int] , *UpperCamelCase_ : int , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : List[str] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Union[str, Any] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Any): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : List[str] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : List[str]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : int , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Optional[Any]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Any): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Union[str, Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : str): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Dict , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Any): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[Any] , *UpperCamelCase_ : int , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Optional[Any] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Optional[Any]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : List[str] , *UpperCamelCase_ : str , **UpperCamelCase_ : Any): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Optional[Any] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[int]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : int , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : str): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : str , *UpperCamelCase_ : Any , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[Any] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Union[str, Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : List[Any]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Optional[int] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : List[Any]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[Any] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Any): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Any , *UpperCamelCase_ : Dict , **UpperCamelCase_ : int): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : List[Any] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Optional[int]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[Any] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[Any]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : int , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Union[str, Any] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Dict , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Optional[Any]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Optional[int] , *UpperCamelCase_ : Any , **UpperCamelCase_ : List[Any]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Optional[int] , *UpperCamelCase_ : str , **UpperCamelCase_ : Optional[int]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[str] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Any): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Optional[int] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Optional[int] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[Any]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Tuple , *UpperCamelCase_ : Any , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : str , *UpperCamelCase_ : str , **UpperCamelCase_ : List[Any]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Optional[Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[Any] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Optional[int]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Dict , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Any): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : int , *UpperCamelCase_ : str , **UpperCamelCase_ : List[Any]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Optional[Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[str]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Any , *UpperCamelCase_ : str , **UpperCamelCase_ : List[str]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Optional[Any] , *UpperCamelCase_ : int , **UpperCamelCase_ : Optional[Any]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : int , *UpperCamelCase_ : Any , **UpperCamelCase_ : str): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Tuple , *UpperCamelCase_ : str , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Dict , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : int): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : int , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : List[Any] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Any , *UpperCamelCase_ : Dict , **UpperCamelCase_ : List[str]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[str] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : List[str]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Tuple , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : List[Any]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Optional[Any] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : int): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Any , *UpperCamelCase_ : Any , **UpperCamelCase_ : int): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Dict , *UpperCamelCase_ : Any , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : int , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Optional[int] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : List[Any]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Tuple , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[str]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : str , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : int): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[str] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : int): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Tuple , *UpperCamelCase_ : Any , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Any , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Optional[Any]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Dict , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[int]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : int , *UpperCamelCase_ : Dict , **UpperCamelCase_ : str): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Optional[Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Any , *UpperCamelCase_ : Any , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : List[str] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Optional[Any]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : List[str] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : List[str]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Dict , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Dict , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Dict , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Tuple): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Any , *UpperCamelCase_ : str , **UpperCamelCase_ : Optional[int]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : List[Any] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Tuple): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Any , *UpperCamelCase_ : str , **UpperCamelCase_ : Optional[Any]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : int , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Any): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Any , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[str]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[str] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Any): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : List[Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Union[str, Any] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Union[str, Any] , *UpperCamelCase_ : Any , **UpperCamelCase_ : List[Any]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Any , *UpperCamelCase_ : Any , **UpperCamelCase_ : Any): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : int , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Optional[int] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Optional[int]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Optional[int] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : str): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : int , *UpperCamelCase_ : Any , **UpperCamelCase_ : List[Any]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Optional[Any] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Any): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Optional[Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : Tuple): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Dict , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[Any]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Optional[Any] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Dict , *UpperCamelCase_ : str , **UpperCamelCase_ : Tuple): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Any , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Tuple , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Any): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : str , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Any , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : str): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : List[str] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Optional[int]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Optional[int] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : int): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Optional[Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : int): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Optional[int] , *UpperCamelCase_ : Any , **UpperCamelCase_ : int): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : List[Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : List[str]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Tuple , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Union[str, Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[Any]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Union[str, Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : str): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Union[str, Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[int]): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Optional[int] , *UpperCamelCase_ : int , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Any , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[Any]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : str , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Tuple): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Optional[Any] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[int]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Dict , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Dict): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Dict , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : int): """simple docstring""" requires_backends(cls , ["torch"]) class a__ ( metaclass=__magic_name__ ): lowercase_ = ["torch"] def __init__( self : Tuple , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Optional[int]): """simple docstring""" requires_backends(self , ["torch"]) @classmethod def a_ ( cls : Dict , *UpperCamelCase_ : Dict , **UpperCamelCase_ : List[str]): """simple docstring""" requires_backends(cls , ["torch"]) @classmethod def a_ ( cls : Union[str, Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : str): """simple docstring""" requires_backends(cls , ["torch"])
77
"""simple docstring""" from typing import Any class a__ : def __init__( self : List[str] , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : str = data __UpperCAmelCase : Optional[Any] = None class a__ : def __init__( self : Any): """simple docstring""" __UpperCAmelCase : Optional[int] = None def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.head while temp is not None: print(temp.data , end=" ") __UpperCAmelCase : Tuple = temp.next print() def a_ ( self : int , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : List[str] = Node(UpperCamelCase_) __UpperCAmelCase : str = self.head __UpperCAmelCase : Optional[int] = new_node def a_ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str): """simple docstring""" if node_data_a == node_data_a: return else: __UpperCAmelCase : int = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Tuple = node_a.next __UpperCAmelCase : List[Any] = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Optional[Any] = node_a.next if node_a is None or node_a is None: return __UpperCAmelCase , __UpperCAmelCase : Any = node_a.data, node_a.data if __name__ == "__main__": A = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("""After swapping""") ll.print_list()
77
1
"""simple docstring""" import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[str]="<pad>" , UpperCamelCase_ : Union[str, Any]=125 , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: __UpperCAmelCase : int = [F"<extra_id_{i}>" for i in range(UpperCamelCase_)] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens __UpperCAmelCase : Dict = len(set(filter(lambda UpperCamelCase_: bool("extra_id" in str(UpperCamelCase_)) , UpperCamelCase_))) if extra_tokens != extra_ids: raise ValueError( F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to ByT5Tokenizer. In this case the additional_special_tokens must include the" " extra_ids tokens") __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else pad_token __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else eos_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else unk_token super().__init__( eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , extra_ids=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : List[str] = extra_ids __UpperCAmelCase : int = 2**8 # utf is 8 bits # define special tokens dict __UpperCAmelCase : Dict[int, str] = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } __UpperCAmelCase : Any = len(self.special_tokens_encoder) __UpperCAmelCase : List[Any] = len(UpperCamelCase_) for i, token in enumerate(UpperCamelCase_): __UpperCAmelCase : Union[str, Any] = self.vocab_size + i - n __UpperCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()} @property def a_ ( self : List[Any]): """simple docstring""" return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def a_ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCamelCase_)) + [1] return ([0] * len(UpperCamelCase_)) + [1] + ([0] * len(UpperCamelCase_)) + [1] def a_ ( self : Optional[Any] , UpperCamelCase_ : List[int]): """simple docstring""" if len(UpperCamelCase_) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" " eos tokens being added.") return token_ids else: return token_ids + [self.eos_token_id] def a_ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Dict = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos) * [0] return len(token_ids_a + eos + token_ids_a + eos) * [0] def a_ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Optional[Any] = self._add_eos_if_not_present(UpperCamelCase_) if token_ids_a is None: return token_ids_a else: __UpperCAmelCase : List[Any] = self._add_eos_if_not_present(UpperCamelCase_) return token_ids_a + token_ids_a def a_ ( self : List[str] , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : Any = [chr(UpperCamelCase_) for i in text.encode("utf-8")] return tokens def a_ ( self : Tuple , UpperCamelCase_ : List[Any]): """simple docstring""" if token in self.special_tokens_encoder: __UpperCAmelCase : Any = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: __UpperCAmelCase : int = self.added_tokens_encoder[token] elif len(UpperCamelCase_) != 1: __UpperCAmelCase : Optional[Any] = self.unk_token_id else: __UpperCAmelCase : Any = ord(UpperCamelCase_) + self._num_special_tokens return token_id def a_ ( self : Any , UpperCamelCase_ : List[str]): """simple docstring""" if index in self.special_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[index] else: __UpperCAmelCase : List[str] = chr(index - self._num_special_tokens) return token def a_ ( self : Dict , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : str = b"" for token in tokens: if token in self.special_tokens_decoder: __UpperCAmelCase : Tuple = self.special_tokens_decoder[token].encode("utf-8") elif token in self.added_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[token].encode("utf-8") elif token in self.special_tokens_encoder: __UpperCAmelCase : Optional[int] = token.encode("utf-8") elif token in self.added_tokens_encoder: __UpperCAmelCase : Optional[Any] = token.encode("utf-8") else: __UpperCAmelCase : Any = bytes([ord(UpperCamelCase_)]) bstring += tok_string __UpperCAmelCase : List[Any] = bstring.decode("utf-8" , errors="ignore") return string def a_ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" return ()
77
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore A = """ Human: <<task>> Assistant: """ A = """huggingface-tools/default-prompts""" A = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""} def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase="run" ) -> List[str]: """simple docstring""" if prompt_or_repo_id is None: __UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , UpperCamelCase ) is not None: return prompt_or_repo_id __UpperCAmelCase : str = cached_file( UpperCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(UpperCamelCase , "r" , encoding="utf-8" ) as f: return f.read()
77
1
"""simple docstring""" import pytest A = """__dummy_dataset1__""" A = """ import json import os import datasets REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\" URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { \"tokens\": datasets.Sequence(datasets.Value(\"string\")), \"ner_tags\": datasets.Sequence( datasets.features.ClassLabel( names=[ \"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", ] ) ), \"langs\": datasets.Sequence(datasets.Value(\"string\")), \"spans\": datasets.Sequence(datasets.Value(\"string\")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}), ] def _generate_examples(self, filepath): with open(filepath, \"r\", encoding=\"utf-8\") as f: for i, line in enumerate(f): yield i, json.loads(line) """ @pytest.fixture def _UpperCamelCase ( ) -> Any: """simple docstring""" return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _UpperCamelCase ( ) -> Union[str, Any]: """simple docstring""" return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: """simple docstring""" __UpperCAmelCase : int = dataset_loading_script_name __UpperCAmelCase : Union[str, Any] = tmp_path / "datasets" / script_name script_dir.mkdir(parents=UpperCamelCase ) __UpperCAmelCase : List[str] = script_dir / f"{script_name}.py" with open(UpperCamelCase , "w" ) as f: f.write(UpperCamelCase ) return str(UpperCamelCase )
77
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available A = { """configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""", """ErnieForCausalLM""", """ErnieForMaskedLM""", """ErnieForMultipleChoice""", """ErnieForNextSentencePrediction""", """ErnieForPreTraining""", """ErnieForQuestionAnswering""", """ErnieForSequenceClassification""", """ErnieForTokenClassification""", """ErnieModel""", """ErniePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A = logging.get_logger(__name__) A = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class a__ ( __magic_name__ , __magic_name__ ): lowercase_ = "convnextv2" def __init__( self : Any , UpperCamelCase_ : str=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : Optional[Any]=4 , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : int=1e-12 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : Dict=224 , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Tuple , ): """simple docstring""" super().__init__(**UpperCamelCase_) __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : Any = patch_size __UpperCAmelCase : Dict = num_stages __UpperCAmelCase : Any = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes __UpperCAmelCase : List[Any] = [3, 3, 9, 3] if depths is None else depths __UpperCAmelCase : Optional[Any] = hidden_act __UpperCAmelCase : Any = initializer_range __UpperCAmelCase : Any = layer_norm_eps __UpperCAmelCase : List[str] = drop_path_rate __UpperCAmelCase : List[Any] = image_size __UpperCAmelCase : Optional[Any] = ["stem"] + [F"stage{idx}" for idx in range(1 , len(self.depths) + 1)] __UpperCAmelCase , __UpperCAmelCase : Optional[int] = get_aligned_output_features_output_indices( out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names)
77
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class a__ ( nn.Module ): def __init__( self : Union[str, Any]): """simple docstring""" super().__init__() __UpperCAmelCase : Optional[int] = nn.Linear(3 , 4) __UpperCAmelCase : str = nn.BatchNormad(4) __UpperCAmelCase : int = nn.Linear(4 , 5) def a_ ( self : str , UpperCamelCase_ : List[str]): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(UpperCamelCase_))) class a__ ( unittest.TestCase ): def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Optional[Any] = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , model.state_dict()) __UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase_ , "index.json") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: __UpperCAmelCase : Optional[int] = os.path.join(UpperCamelCase_ , F"{key}.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on the fact weights are properly loaded def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : int = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: __UpperCAmelCase : List[Any] = torch.randn(2 , 3 , dtype=UpperCamelCase_) with TemporaryDirectory() as tmp_dir: __UpperCAmelCase : Tuple = offload_weight(UpperCamelCase_ , "weight" , UpperCamelCase_ , {}) __UpperCAmelCase : Dict = os.path.join(UpperCamelCase_ , "weight.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) self.assertDictEqual(UpperCamelCase_ , {"weight": {"shape": [2, 3], "dtype": str(UpperCamelCase_).split(".")[1]}}) __UpperCAmelCase : Optional[Any] = load_offloaded_weight(UpperCamelCase_ , index["weight"]) self.assertTrue(torch.equal(UpperCamelCase_ , UpperCamelCase_)) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : List[Any] = ModelForTest() __UpperCAmelCase : Optional[int] = model.state_dict() __UpperCAmelCase : List[str] = {k: v for k, v in state_dict.items() if "linear2" not in k} __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "linear2" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : List[str] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "weight" in k} __UpperCAmelCase : Optional[Any] = {k: v for k, v in state_dict.items() if "weight" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[Any] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) # Duplicates are removed __UpperCAmelCase : str = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Any = {"a.1": 0, "a.10": 1, "a.2": 2} __UpperCAmelCase : Union[str, Any] = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1": 0, "a.2": 2}) __UpperCAmelCase : int = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2} __UpperCAmelCase : int = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1.a": 0, "a.2.a": 2})
77
1
"""simple docstring""" from typing import Any class a__ : def __init__( self : List[str] , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : str = data __UpperCAmelCase : Optional[Any] = None class a__ : def __init__( self : Any): """simple docstring""" __UpperCAmelCase : Optional[int] = None def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.head while temp is not None: print(temp.data , end=" ") __UpperCAmelCase : Tuple = temp.next print() def a_ ( self : int , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : List[str] = Node(UpperCamelCase_) __UpperCAmelCase : str = self.head __UpperCAmelCase : Optional[int] = new_node def a_ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str): """simple docstring""" if node_data_a == node_data_a: return else: __UpperCAmelCase : int = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Tuple = node_a.next __UpperCAmelCase : List[Any] = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Optional[Any] = node_a.next if node_a is None or node_a is None: return __UpperCAmelCase , __UpperCAmelCase : Any = node_a.data, node_a.data if __name__ == "__main__": A = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("""After swapping""") ll.print_list()
77
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Dict = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): __UpperCAmelCase : Union[str, Any] = n - k # Calculate C(n,k) for i in range(UpperCamelCase ): result *= n - i result //= i + 1 return result def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" return binomial_coefficient(2 * node_count , UpperCamelCase ) // (node_count + 1) def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" if n < 0: raise ValueError("factorial() not defined for negative values" ) __UpperCAmelCase : Optional[Any] = 1 for i in range(1 , n + 1 ): result *= i return result def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" return catalan_number(UpperCamelCase ) * factorial(UpperCamelCase ) if __name__ == "__main__": A = int(input("""Enter the number of nodes: """).strip() or 0) if node_count <= 0: raise ValueError("""We need some nodes to work with.""") print( f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} ''' f'''binary trees and {catalan_number(node_count)} binary search trees.''' )
77
1
"""simple docstring""" A = """Alexander Joslin""" import operator as op from .stack import Stack def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Optional[int] = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub} __UpperCAmelCase : Stack[int] = Stack() __UpperCAmelCase : Stack[str] = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(UpperCamelCase ) ) elif i in operators: # RULE 2 operator_stack.push(UpperCamelCase ) elif i == ")": # RULE 4 __UpperCAmelCase : Dict = operator_stack.peek() operator_stack.pop() __UpperCAmelCase : List[str] = operand_stack.peek() operand_stack.pop() __UpperCAmelCase : List[Any] = operand_stack.peek() operand_stack.pop() __UpperCAmelCase : Any = operators[opr](UpperCamelCase , UpperCamelCase ) operand_stack.push(UpperCamelCase ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": A = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
77
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) A = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a__ ( unittest.TestCase ): def __init__( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any=13 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : int=224 , UpperCamelCase_ : int=30 , UpperCamelCase_ : str=400 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ): """simple docstring""" __UpperCAmelCase : Tuple = size if size is not None else {"height": 18, "width": 18} __UpperCAmelCase : List[Any] = parent __UpperCAmelCase : Tuple = batch_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : List[Any] = image_size __UpperCAmelCase : str = min_resolution __UpperCAmelCase : Tuple = max_resolution __UpperCAmelCase : Optional[Any] = do_resize __UpperCAmelCase : Any = size __UpperCAmelCase : Any = do_normalize __UpperCAmelCase : Any = image_mean __UpperCAmelCase : Optional[Any] = image_std def a_ ( self : str): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ViTImageProcessor if is_vision_available() else None def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Optional[Any] = EfficientFormerImageProcessorTester(self) @property def a_ ( self : Union[str, Any]): """simple docstring""" return self.image_proc_tester.prepare_image_processor_dict() def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase_ , "image_mean")) self.assertTrue(hasattr(UpperCamelCase_ , "image_std")) self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize")) self.assertTrue(hasattr(UpperCamelCase_ , "do_resize")) self.assertTrue(hasattr(UpperCamelCase_ , "size")) def a_ ( self : Dict): """simple docstring""" pass def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __UpperCAmelCase : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image) # Test not batched input __UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray) # Test not batched input __UpperCAmelCase : Tuple = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Any = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __UpperCAmelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor) # Test not batched input __UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
77
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a__ ( unittest.TestCase ): def __init__( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any=13 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : int=224 , UpperCamelCase_ : int=30 , UpperCamelCase_ : str=400 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ): """simple docstring""" __UpperCAmelCase : Tuple = size if size is not None else {"height": 18, "width": 18} __UpperCAmelCase : List[Any] = parent __UpperCAmelCase : Tuple = batch_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : List[Any] = image_size __UpperCAmelCase : str = min_resolution __UpperCAmelCase : Tuple = max_resolution __UpperCAmelCase : Optional[Any] = do_resize __UpperCAmelCase : Any = size __UpperCAmelCase : Any = do_normalize __UpperCAmelCase : Any = image_mean __UpperCAmelCase : Optional[Any] = image_std def a_ ( self : str): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ViTImageProcessor if is_vision_available() else None def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Optional[Any] = EfficientFormerImageProcessorTester(self) @property def a_ ( self : Union[str, Any]): """simple docstring""" return self.image_proc_tester.prepare_image_processor_dict() def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase_ , "image_mean")) self.assertTrue(hasattr(UpperCamelCase_ , "image_std")) self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize")) self.assertTrue(hasattr(UpperCamelCase_ , "do_resize")) self.assertTrue(hasattr(UpperCamelCase_ , "size")) def a_ ( self : Dict): """simple docstring""" pass def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __UpperCAmelCase : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image) # Test not batched input __UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray) # Test not batched input __UpperCAmelCase : Tuple = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Any = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __UpperCAmelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor) # Test not batched input __UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
77
1
"""simple docstring""" import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging A = logging.get_logger(__name__) A = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class a__ ( __magic_name__ ): lowercase_ = "bart" lowercase_ = ["past_key_values"] lowercase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : List[Any] , UpperCamelCase_ : Optional[Any]=50265 , UpperCamelCase_ : int=1024 , UpperCamelCase_ : List[str]=12 , UpperCamelCase_ : List[str]=4096 , UpperCamelCase_ : int=16 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : Any=4096 , UpperCamelCase_ : Dict=16 , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1024 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : str=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Optional[int]=1 , UpperCamelCase_ : Any=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Optional[int]=2 , **UpperCamelCase_ : Any , ): """simple docstring""" __UpperCAmelCase : str = vocab_size __UpperCAmelCase : Optional[int] = max_position_embeddings __UpperCAmelCase : List[str] = d_model __UpperCAmelCase : List[str] = encoder_ffn_dim __UpperCAmelCase : List[str] = encoder_layers __UpperCAmelCase : Optional[int] = encoder_attention_heads __UpperCAmelCase : Tuple = decoder_ffn_dim __UpperCAmelCase : List[Any] = decoder_layers __UpperCAmelCase : Dict = decoder_attention_heads __UpperCAmelCase : int = dropout __UpperCAmelCase : str = attention_dropout __UpperCAmelCase : Dict = activation_dropout __UpperCAmelCase : str = activation_function __UpperCAmelCase : Union[str, Any] = init_std __UpperCAmelCase : Optional[int] = encoder_layerdrop __UpperCAmelCase : List[str] = decoder_layerdrop __UpperCAmelCase : Dict = classifier_dropout __UpperCAmelCase : Any = use_cache __UpperCAmelCase : Optional[int] = encoder_layers __UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , UpperCamelCase_): __UpperCAmelCase : Dict = self.bos_token_id warnings.warn( F"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. " "The config can simply be saved and uploaded again to be fixed.") class a__ ( __magic_name__ ): @property def a_ ( self : int): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __UpperCAmelCase : Optional[Any] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ]) if self.use_past: __UpperCAmelCase : Dict = {0: "batch"} __UpperCAmelCase : str = {0: "batch", 1: "past_decoder_sequence + sequence"} else: __UpperCAmelCase : Optional[Any] = {0: "batch", 1: "decoder_sequence"} __UpperCAmelCase : Optional[int] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(UpperCamelCase_ , direction="inputs") elif self.task == "causal-lm": # TODO: figure this case out. __UpperCAmelCase : Dict = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ]) if self.use_past: __UpperCAmelCase , __UpperCAmelCase : int = self.num_layers for i in range(UpperCamelCase_): __UpperCAmelCase : int = {0: "batch", 2: "past_sequence + sequence"} __UpperCAmelCase : int = {0: "batch", 2: "past_sequence + sequence"} else: __UpperCAmelCase : Union[str, Any] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ]) return common_inputs @property def a_ ( self : Any): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __UpperCAmelCase : Optional[Any] = super().outputs else: __UpperCAmelCase : Any = super(UpperCamelCase_ , self).outputs if self.use_past: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.num_layers for i in range(UpperCamelCase_): __UpperCAmelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"} __UpperCAmelCase : List[str] = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def a_ ( self : List[str] , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ): """simple docstring""" __UpperCAmelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) # Generate decoder inputs __UpperCAmelCase : str = seq_length if not self.use_past else 1 __UpperCAmelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} __UpperCAmelCase : Optional[Any] = dict(**UpperCamelCase_ , **UpperCamelCase_) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch __UpperCAmelCase , __UpperCAmelCase : Tuple = common_inputs["input_ids"].shape __UpperCAmelCase : str = common_inputs["decoder_input_ids"].shape[1] __UpperCAmelCase , __UpperCAmelCase : Dict = self.num_attention_heads __UpperCAmelCase : Dict = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __UpperCAmelCase : List[str] = decoder_seq_length + 3 __UpperCAmelCase : Union[str, Any] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __UpperCAmelCase : str = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(UpperCamelCase_ , UpperCamelCase_)] , dim=1) __UpperCAmelCase : str = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __UpperCAmelCase , __UpperCAmelCase : List[Any] = self.num_layers __UpperCAmelCase : Any = min(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : int = max(UpperCamelCase_ , UpperCamelCase_) - min_num_layers __UpperCAmelCase : int = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(UpperCamelCase_): common_inputs["past_key_values"].append( ( torch.zeros(UpperCamelCase_), torch.zeros(UpperCamelCase_), torch.zeros(UpperCamelCase_), torch.zeros(UpperCamelCase_), )) # TODO: test this. __UpperCAmelCase : Tuple = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(UpperCamelCase_ , UpperCamelCase_): common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase_), torch.zeros(UpperCamelCase_))) return common_inputs def a_ ( self : str , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ): """simple docstring""" __UpperCAmelCase : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch __UpperCAmelCase , __UpperCAmelCase : Tuple = common_inputs["input_ids"].shape # Not using the same length for past_key_values __UpperCAmelCase : Dict = seqlen + 2 __UpperCAmelCase , __UpperCAmelCase : str = self.num_layers __UpperCAmelCase , __UpperCAmelCase : Tuple = self.num_attention_heads __UpperCAmelCase : List[Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __UpperCAmelCase : List[Any] = common_inputs["attention_mask"].dtype __UpperCAmelCase : Optional[int] = torch.cat( [common_inputs["attention_mask"], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_)] , dim=1) __UpperCAmelCase : Tuple = [ (torch.zeros(UpperCamelCase_), torch.zeros(UpperCamelCase_)) for _ in range(UpperCamelCase_) ] return common_inputs def a_ ( self : Tuple , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ): """simple docstring""" __UpperCAmelCase : Optional[int] = compute_effective_axis_dimension( UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __UpperCAmelCase : Dict = tokenizer.num_special_tokens_to_add(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = compute_effective_axis_dimension( UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_) # Generate dummy inputs according to compute batch and sequence __UpperCAmelCase : List[Any] = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size __UpperCAmelCase : Optional[Any] = dict(tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_)) return common_inputs def a_ ( self : Optional[Any] , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __UpperCAmelCase : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm( UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_) elif self.task == "causal-lm": __UpperCAmelCase : Tuple = self._generate_dummy_inputs_for_causal_lm( UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_) else: __UpperCAmelCase : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_) return common_inputs def a_ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int]): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __UpperCAmelCase : Any = super()._flatten_past_key_values_(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) else: __UpperCAmelCase : List[str] = super(UpperCamelCase_ , self)._flatten_past_key_values_( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
77
"""simple docstring""" from collections import namedtuple A = namedtuple("""from_to""", """from_ to""") A = { """cubicmeter""": from_to(1, 1), """litre""": from_to(0.001, 1_000), """kilolitre""": from_to(1, 1), """gallon""": from_to(0.00454, 264.172), """cubicyard""": from_to(0.76455, 1.30795), """cubicfoot""": from_to(0.028, 35.3147), """cup""": from_to(0.000236588, 4226.75), } def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: """simple docstring""" if from_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'from_type' value: {from_type!r} Supported values are:\n" + ", ".join(UpperCamelCase ) ) if to_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n" + ", ".join(UpperCamelCase ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
77
1
"""simple docstring""" import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput A = logging.get_logger(__name__) # pylint: disable=invalid-name def _UpperCamelCase ( UpperCamelCase ) -> Any: """simple docstring""" warnings.warn( "The preprocess method is deprecated and will be removed in a future version. Please" " use VaeImageProcessor.preprocess instead" , UpperCamelCase , ) if isinstance(UpperCamelCase , torch.Tensor ): return image elif isinstance(UpperCamelCase , PIL.Image.Image ): __UpperCAmelCase : int = [image] if isinstance(image[0] , PIL.Image.Image ): __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = image[0].size __UpperCAmelCase , __UpperCAmelCase : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 __UpperCAmelCase : str = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image] __UpperCAmelCase : Optional[int] = np.concatenate(UpperCamelCase , axis=0 ) __UpperCAmelCase : int = np.array(UpperCamelCase ).astype(np.floataa ) / 255.0 __UpperCAmelCase : Optional[int] = image.transpose(0 , 3 , 1 , 2 ) __UpperCAmelCase : Tuple = 2.0 * image - 1.0 __UpperCAmelCase : List[str] = torch.from_numpy(UpperCamelCase ) elif isinstance(image[0] , torch.Tensor ): __UpperCAmelCase : Dict = torch.cat(UpperCamelCase , dim=0 ) return image def _UpperCamelCase ( UpperCamelCase ) -> List[Any]: """simple docstring""" if isinstance(UpperCamelCase , torch.Tensor ): return mask elif isinstance(UpperCamelCase , PIL.Image.Image ): __UpperCAmelCase : str = [mask] if isinstance(mask[0] , PIL.Image.Image ): __UpperCAmelCase , __UpperCAmelCase : Dict = mask[0].size __UpperCAmelCase , __UpperCAmelCase : Tuple = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 __UpperCAmelCase : Union[str, Any] = [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask] __UpperCAmelCase : Optional[Any] = np.concatenate(UpperCamelCase , axis=0 ) __UpperCAmelCase : Dict = mask.astype(np.floataa ) / 255.0 __UpperCAmelCase : int = 0 __UpperCAmelCase : Dict = 1 __UpperCAmelCase : List[str] = torch.from_numpy(UpperCamelCase ) elif isinstance(mask[0] , torch.Tensor ): __UpperCAmelCase : List[str] = torch.cat(UpperCamelCase , dim=0 ) return mask class a__ ( __magic_name__ ): lowercase_ = 42 lowercase_ = 42 def __init__( self : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str]): """simple docstring""" super().__init__() self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_) @torch.no_grad() def __call__( self : int , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : int = 250 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 10 , UpperCamelCase_ : int = 10 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ): """simple docstring""" __UpperCAmelCase : Dict = image __UpperCAmelCase : Any = _preprocess_image(UpperCamelCase_) __UpperCAmelCase : str = original_image.to(device=self.device , dtype=self.unet.dtype) __UpperCAmelCase : List[str] = _preprocess_mask(UpperCamelCase_) __UpperCAmelCase : List[Any] = mask_image.to(device=self.device , dtype=self.unet.dtype) __UpperCAmelCase : Dict = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(UpperCamelCase_ , UpperCamelCase_) and len(UpperCamelCase_) != batch_size: raise ValueError( F"You have passed a list of generators of length {len(UpperCamelCase_)}, but requested an effective batch" F" size of {batch_size}. Make sure the batch size matches the length of the generators.") __UpperCAmelCase : Tuple = original_image.shape __UpperCAmelCase : int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype) # set step values self.scheduler.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.device) __UpperCAmelCase : Any = eta __UpperCAmelCase : List[Any] = self.scheduler.timesteps[0] + 1 __UpperCAmelCase : Optional[Any] = generator[0] if isinstance(UpperCamelCase_ , UpperCamelCase_) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): if t < t_last: # predict the noise residual __UpperCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_).sample # compute previous image: x_t -> x_t-1 __UpperCAmelCase : int = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_).prev_sample else: # compute the reverse: x_t-1 -> x_t __UpperCAmelCase : Union[str, Any] = self.scheduler.undo_step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Tuple = t __UpperCAmelCase : str = (image / 2 + 0.5).clamp(0 , 1) __UpperCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": __UpperCAmelCase : Tuple = self.numpy_to_pil(UpperCamelCase_) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_)
77
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ShapEPipeline lowercase_ = ["prompt"] lowercase_ = ["prompt"] lowercase_ = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] lowercase_ = False @property def a_ ( self : Optional[int]): """simple docstring""" return 32 @property def a_ ( self : Any): """simple docstring""" return 32 @property def a_ ( self : int): """simple docstring""" return self.time_input_dim * 4 @property def a_ ( self : List[Any]): """simple docstring""" return 8 @property def a_ ( self : List[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def a_ ( self : List[str]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(UpperCamelCase_) @property def a_ ( self : Any): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Union[str, Any] = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } __UpperCAmelCase : Dict = PriorTransformer(**UpperCamelCase_) return model @property def a_ ( self : Union[str, Any]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Tuple = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } __UpperCAmelCase : List[Any] = ShapERenderer(**UpperCamelCase_) return model def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.dummy_prior __UpperCAmelCase : str = self.dummy_text_encoder __UpperCAmelCase : int = self.dummy_tokenizer __UpperCAmelCase : int = self.dummy_renderer __UpperCAmelCase : Tuple = HeunDiscreteScheduler( beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , ) __UpperCAmelCase : str = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "renderer": renderer, "scheduler": scheduler, } return components def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any=0): """simple docstring""" if str(UpperCamelCase_).startswith("mps"): __UpperCAmelCase : List[Any] = torch.manual_seed(UpperCamelCase_) else: __UpperCAmelCase : str = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_) __UpperCAmelCase : List[Any] = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "np", } return inputs def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : str = "cpu" __UpperCAmelCase : Union[str, Any] = self.get_dummy_components() __UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase_)) __UpperCAmelCase : Union[str, Any] = output.images[0] __UpperCAmelCase : str = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __UpperCAmelCase : Union[str, Any] = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self : Tuple): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2]) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch_device == "cpu" __UpperCAmelCase : Optional[Any] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , ) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.get_dummy_components() __UpperCAmelCase : List[str] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : int = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Any = 2 __UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_) for key in inputs.keys(): if key in self.batch_params: __UpperCAmelCase : List[Any] = batch_size * [inputs[key]] __UpperCAmelCase : List[Any] = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_)[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class a__ ( unittest.TestCase ): def a_ ( self : List[str]): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy") __UpperCAmelCase : Optional[Any] = ShapEPipeline.from_pretrained("openai/shap-e") __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Dict = torch.Generator(device=UpperCamelCase_).manual_seed(0) __UpperCAmelCase : int = pipe( "a shark" , generator=UpperCamelCase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_)
77
1
"""simple docstring""" from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class a__ ( nn.Module ): def __init__( self : Any , UpperCamelCase_ : int = 16 , UpperCamelCase_ : int = 88 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : int = 1 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 32 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : str = "geglu" , UpperCamelCase_ : Optional[int] = None , ): """simple docstring""" super().__init__() __UpperCAmelCase : str = nn.ModuleList( [ TransformeraDModel( num_attention_heads=UpperCamelCase_ , attention_head_dim=UpperCamelCase_ , in_channels=UpperCamelCase_ , num_layers=UpperCamelCase_ , dropout=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , cross_attention_dim=UpperCamelCase_ , attention_bias=UpperCamelCase_ , sample_size=UpperCamelCase_ , num_vector_embeds=UpperCamelCase_ , activation_fn=UpperCamelCase_ , num_embeds_ada_norm=UpperCamelCase_ , ) for _ in range(2) ]) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference __UpperCAmelCase : Tuple = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` __UpperCAmelCase : Union[str, Any] = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` __UpperCAmelCase : Tuple = [1, 0] def a_ ( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : bool = True , ): """simple docstring""" __UpperCAmelCase : Any = hidden_states __UpperCAmelCase : str = [] __UpperCAmelCase : int = 0 # attention_mask is not used yet for i in range(2): # for each of the two transformers, pass the corresponding condition tokens __UpperCAmelCase : List[str] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] __UpperCAmelCase : Optional[Any] = self.transformer_index_for_condition[i] __UpperCAmelCase : Dict = self.transformers[transformer_index]( UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ , cross_attention_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0] encoded_states.append(encoded_state - input_states) tokens_start += self.condition_lengths[i] __UpperCAmelCase : Union[str, Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) __UpperCAmelCase : Optional[Any] = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=UpperCamelCase_)
77
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_features", "is_longer"] def __init__( self : List[str] , UpperCamelCase_ : Dict=64 , UpperCamelCase_ : Tuple=48000 , UpperCamelCase_ : List[Any]=480 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=1024 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 14000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Union[str, Any] = top_db __UpperCAmelCase : Optional[Any] = truncation __UpperCAmelCase : str = padding __UpperCAmelCase : int = fft_window_size __UpperCAmelCase : str = (fft_window_size >> 1) + 1 __UpperCAmelCase : List[Any] = hop_length __UpperCAmelCase : Optional[Any] = max_length_s __UpperCAmelCase : Tuple = max_length_s * sampling_rate __UpperCAmelCase : str = sampling_rate __UpperCAmelCase : int = frequency_min __UpperCAmelCase : Optional[Any] = frequency_max __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale="htk" , ) __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm="slaney" , mel_scale="slaney" , ) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Dict = copy.deepcopy(self.__dict__) __UpperCAmelCase : str = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a_ ( self : int , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None): """simple docstring""" __UpperCAmelCase : List[Any] = spectrogram( UpperCamelCase_ , window_function(self.fft_window_size , "hann") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel="dB" , ) return log_mel_spectrogram.T def a_ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3) if len(ranges[1]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : str = [0] if len(ranges[2]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : Dict = [0] # randomly choose index for each part __UpperCAmelCase : Dict = np.random.choice(ranges[0]) __UpperCAmelCase : List[str] = np.random.choice(ranges[1]) __UpperCAmelCase : List[Any] = np.random.choice(ranges[2]) __UpperCAmelCase : List[Any] = mel[idx_front : idx_front + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_middle : idx_middle + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_back : idx_back + chunk_frames, :] __UpperCAmelCase : Tuple = torch.tensor(mel[None, None, :]) __UpperCAmelCase : Union[str, Any] = torch.nn.functional.interpolate( UpperCamelCase_ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = mel_shrink[0][0].numpy() __UpperCAmelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0) return mel_fusion def a_ ( self : Optional[Any] , UpperCamelCase_ : np.array , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]): """simple docstring""" if waveform.shape[0] > max_length: if truncation == "rand_trunc": __UpperCAmelCase : List[str] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad __UpperCAmelCase : List[Any] = len(UpperCamelCase_) - max_length __UpperCAmelCase : int = np.random.randint(0 , overflow + 1) __UpperCAmelCase : Union[str, Any] = waveform[idx : idx + max_length] __UpperCAmelCase : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] elif truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed __UpperCAmelCase : Tuple = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. __UpperCAmelCase : List[str] = np.stack([mel, mel, mel, mel] , axis=0) __UpperCAmelCase : Any = False else: __UpperCAmelCase : List[str] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = True else: raise NotImplementedError(F"data_truncating {truncation} not implemented") else: __UpperCAmelCase : Optional[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": __UpperCAmelCase : Tuple = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1))[:max_length] if padding == "repeatpad": __UpperCAmelCase : Union[str, Any] = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : Optional[Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : int = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0) if truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0) else: __UpperCAmelCase : Optional[int] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] return input_mel, longer def __call__( self : Dict , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Any , ): """simple docstring""" __UpperCAmelCase : int = truncation if truncation is not None else self.truncation __UpperCAmelCase : Optional[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" F" was sampled with {self.sampling_rate} and not {sampling_rate}.") else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug.") __UpperCAmelCase : List[str] = isinstance(UpperCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}") __UpperCAmelCase : str = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: __UpperCAmelCase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray): __UpperCAmelCase : Tuple = np.asarray(UpperCamelCase_ , dtype=np.floataa) elif isinstance(UpperCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): __UpperCAmelCase : Optional[int] = raw_speech.astype(np.floataa) # always return batch if not is_batched: __UpperCAmelCase : int = [np.asarray(UpperCamelCase_)] # convert to mel spectrogram, truncate and pad if needed. __UpperCAmelCase : Optional[int] = [ self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_) for waveform in raw_speech ] __UpperCAmelCase : Tuple = [] __UpperCAmelCase : List[Any] = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase_) is_longer.append(UpperCamelCase_) if truncation == "fusion" and sum(UpperCamelCase_) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer __UpperCAmelCase : Any = np.random.randint(0 , len(UpperCamelCase_)) __UpperCAmelCase : Optional[int] = True if isinstance(input_mel[0] , UpperCamelCase_): __UpperCAmelCase : Tuple = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for feature in input_mel] # is_longer is a list of bool __UpperCAmelCase : List[str] = [[longer] for longer in is_longer] __UpperCAmelCase : Optional[int] = {"input_features": input_mel, "is_longer": is_longer} __UpperCAmelCase : Optional[int] = BatchFeature(UpperCamelCase_) if return_tensors is not None: __UpperCAmelCase : Any = input_features.convert_to_tensors(UpperCamelCase_) return input_features
77
1
"""simple docstring""" import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features A = logging.get_logger(__name__) A = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class a__ : lowercase_ = field( default=__magic_name__ , metadata={"help": "Model type selected in the list: " + ", ".join(__magic_name__ )} ) lowercase_ = field( default=__magic_name__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} ) lowercase_ = field( default=1_2_8 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) lowercase_ = field( default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) lowercase_ = field( default=6_4 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) lowercase_ = field( default=3_0 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) lowercase_ = field( default=__magic_name__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) lowercase_ = field( default=__magic_name__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} ) lowercase_ = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) lowercase_ = field( default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) lowercase_ = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) lowercase_ = field(default=1 , metadata={"help": "multiple threads for converting example to features"} ) class a__ ( __magic_name__ ): lowercase_ = "train" lowercase_ = "dev" class a__ ( __magic_name__ ): lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 lowercase_ = 42 def __init__( self : Tuple , UpperCamelCase_ : SquadDataTrainingArguments , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Union[str, Split] = Split.train , UpperCamelCase_ : Optional[bool] = False , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[str] = "pt" , ): """simple docstring""" __UpperCAmelCase : Optional[int] = args __UpperCAmelCase : Any = is_language_sensitive __UpperCAmelCase : Optional[Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(UpperCamelCase_ , UpperCamelCase_): try: __UpperCAmelCase : Any = Split[mode] except KeyError: raise KeyError("mode is not a valid split name") __UpperCAmelCase : Optional[int] = mode # Load data features from cache or dataset file __UpperCAmelCase : int = "v2" if args.version_2_with_negative else "v1" __UpperCAmelCase : Optional[Any] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __UpperCAmelCase : Optional[int] = cached_features_file + ".lock" with FileLock(UpperCamelCase_): if os.path.exists(UpperCamelCase_) and not args.overwrite_cache: __UpperCAmelCase : Union[str, Any] = time.time() __UpperCAmelCase : Union[str, Any] = torch.load(UpperCamelCase_) # Legacy cache files have only features, while new cache files # will have dataset and examples also. __UpperCAmelCase : Optional[Any] = self.old_features["features"] __UpperCAmelCase : int = self.old_features.get("dataset" , UpperCamelCase_) __UpperCAmelCase : Tuple = self.old_features.get("examples" , UpperCamelCase_) logger.info( F"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start) if self.dataset is None or self.examples is None: logger.warning( F"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" " future run") else: if mode == Split.dev: __UpperCAmelCase : Union[str, Any] = self.processor.get_dev_examples(args.data_dir) else: __UpperCAmelCase : List[str] = self.processor.get_train_examples(args.data_dir) __UpperCAmelCase , __UpperCAmelCase : int = squad_convert_examples_to_features( examples=self.examples , tokenizer=UpperCamelCase_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=UpperCamelCase_ , ) __UpperCAmelCase : Union[str, Any] = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , UpperCamelCase_ , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]") def __len__( self : Union[str, Any]): """simple docstring""" return len(self.features) def __getitem__( self : int , UpperCamelCase_ : Union[str, Any]): """simple docstring""" __UpperCAmelCase : List[Any] = self.features[i] __UpperCAmelCase : List[Any] = torch.tensor(feature.input_ids , dtype=torch.long) __UpperCAmelCase : Tuple = torch.tensor(feature.attention_mask , dtype=torch.long) __UpperCAmelCase : int = torch.tensor(feature.token_type_ids , dtype=torch.long) __UpperCAmelCase : str = torch.tensor(feature.cls_index , dtype=torch.long) __UpperCAmelCase : Optional[int] = torch.tensor(feature.p_mask , dtype=torch.float) __UpperCAmelCase : List[Any] = torch.tensor(feature.is_impossible , dtype=torch.float) __UpperCAmelCase : str = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask}) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible}) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa) * self.args.lang_id)}) if self.mode == Split.train: __UpperCAmelCase : Tuple = torch.tensor(feature.start_position , dtype=torch.long) __UpperCAmelCase : List[Any] = torch.tensor(feature.end_position , dtype=torch.long) inputs.update({"start_positions": start_positions, "end_positions": end_positions}) return inputs
77
"""simple docstring""" import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[str]="<pad>" , UpperCamelCase_ : Union[str, Any]=125 , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: __UpperCAmelCase : int = [F"<extra_id_{i}>" for i in range(UpperCamelCase_)] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens __UpperCAmelCase : Dict = len(set(filter(lambda UpperCamelCase_: bool("extra_id" in str(UpperCamelCase_)) , UpperCamelCase_))) if extra_tokens != extra_ids: raise ValueError( F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to ByT5Tokenizer. In this case the additional_special_tokens must include the" " extra_ids tokens") __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else pad_token __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else eos_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else unk_token super().__init__( eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , extra_ids=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : List[str] = extra_ids __UpperCAmelCase : int = 2**8 # utf is 8 bits # define special tokens dict __UpperCAmelCase : Dict[int, str] = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } __UpperCAmelCase : Any = len(self.special_tokens_encoder) __UpperCAmelCase : List[Any] = len(UpperCamelCase_) for i, token in enumerate(UpperCamelCase_): __UpperCAmelCase : Union[str, Any] = self.vocab_size + i - n __UpperCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()} @property def a_ ( self : List[Any]): """simple docstring""" return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def a_ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCamelCase_)) + [1] return ([0] * len(UpperCamelCase_)) + [1] + ([0] * len(UpperCamelCase_)) + [1] def a_ ( self : Optional[Any] , UpperCamelCase_ : List[int]): """simple docstring""" if len(UpperCamelCase_) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" " eos tokens being added.") return token_ids else: return token_ids + [self.eos_token_id] def a_ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Dict = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos) * [0] return len(token_ids_a + eos + token_ids_a + eos) * [0] def a_ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Optional[Any] = self._add_eos_if_not_present(UpperCamelCase_) if token_ids_a is None: return token_ids_a else: __UpperCAmelCase : List[Any] = self._add_eos_if_not_present(UpperCamelCase_) return token_ids_a + token_ids_a def a_ ( self : List[str] , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : Any = [chr(UpperCamelCase_) for i in text.encode("utf-8")] return tokens def a_ ( self : Tuple , UpperCamelCase_ : List[Any]): """simple docstring""" if token in self.special_tokens_encoder: __UpperCAmelCase : Any = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: __UpperCAmelCase : int = self.added_tokens_encoder[token] elif len(UpperCamelCase_) != 1: __UpperCAmelCase : Optional[Any] = self.unk_token_id else: __UpperCAmelCase : Any = ord(UpperCamelCase_) + self._num_special_tokens return token_id def a_ ( self : Any , UpperCamelCase_ : List[str]): """simple docstring""" if index in self.special_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[index] else: __UpperCAmelCase : List[str] = chr(index - self._num_special_tokens) return token def a_ ( self : Dict , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : str = b"" for token in tokens: if token in self.special_tokens_decoder: __UpperCAmelCase : Tuple = self.special_tokens_decoder[token].encode("utf-8") elif token in self.added_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[token].encode("utf-8") elif token in self.special_tokens_encoder: __UpperCAmelCase : Optional[int] = token.encode("utf-8") elif token in self.added_tokens_encoder: __UpperCAmelCase : Optional[Any] = token.encode("utf-8") else: __UpperCAmelCase : Any = bytes([ord(UpperCamelCase_)]) bstring += tok_string __UpperCAmelCase : List[Any] = bstring.decode("utf-8" , errors="ignore") return string def a_ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" return ()
77
1
"""simple docstring""" import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel A = HfApi() A = {} # fmt: off A = torch.tensor([ -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467, 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189, -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839, 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557 ]) A = torch.tensor([ -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436, 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208, -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948, 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365 ]) A = torch.tensor([ -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869, -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304, -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925, 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943 ]) A = torch.tensor([ 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172, -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309, 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805, -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505 ]) A = torch.tensor([ 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133, -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395, 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559, -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386 ]) A = torch.tensor([ 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078, -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330, 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683, -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431 ]) A = torch.tensor([ 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042, -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398, 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574, -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390 ]) A = torch.tensor([ 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042, -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290, 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746, -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473 ]) A = torch.tensor([ -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330, 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243, -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810, 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251]) A = torch.tensor([ -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324, 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181, -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259, 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266 ]) A = torch.tensor([ -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212, 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027, -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131, 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355 ]) A = torch.tensor([ -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959, 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351, -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341, 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066 ]) A = torch.tensor([ -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740, 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398, -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395, 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243 ]) A = torch.tensor([ -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336, 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908, -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560, 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343 ]) A = torch.tensor([ -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344, 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391, -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439, 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219 ]) # fmt: on A = api.list_models(filter="""diffusers""") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": A = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1] print(f'''Started running {mod.modelId}!!!''') if mod.modelId.startswith("""CompVis"""): A = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""") else: A = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) A = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) A = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): A = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3 ) print(f'''{mod.modelId} has passed successfully!!!''')
77
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a__ ( unittest.TestCase ): def __init__( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=[10, 20, 30, 40] , UpperCamelCase_ : Tuple=[1, 1, 2, 1] , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict="relu" , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=None , ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : List[str] = image_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : Union[str, Any] = embeddings_size __UpperCAmelCase : Dict = hidden_sizes __UpperCAmelCase : Dict = depths __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : str = num_labels __UpperCAmelCase : Optional[int] = scope __UpperCAmelCase : Dict = len(UpperCamelCase_) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __UpperCAmelCase : Dict = self.get_config() return config, pixel_values def a_ ( self : Dict): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def a_ ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]): """simple docstring""" __UpperCAmelCase : List[str] = FlaxRegNetModel(config=UpperCamelCase_) __UpperCAmelCase : Dict = model(UpperCamelCase_) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a_ ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : List[Any] = self.num_labels __UpperCAmelCase : Tuple = FlaxRegNetForImageClassification(config=UpperCamelCase_) __UpperCAmelCase : str = model(UpperCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Any = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () lowercase_ = False lowercase_ = False lowercase_ = False def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = FlaxRegNetModelTester(self) __UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_) def a_ ( self : Dict): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a_ ( self : Tuple): """simple docstring""" return def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_) @unittest.skip(reason="RegNet does not use inputs_embeds") def a_ ( self : Union[str, Any]): """simple docstring""" pass @unittest.skip(reason="RegNet does not support input and output embeddings") def a_ ( self : Optional[int]): """simple docstring""" pass def a_ ( self : str): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : int = model_class(UpperCamelCase_) __UpperCAmelCase : Optional[int] = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Any = [*signature.parameters.keys()] __UpperCAmelCase : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase_) def a_ ( self : int): """simple docstring""" def check_hidden_states_output(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]): __UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : str = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase_) , expected_num_stages + 1) __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : List[str] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Optional[int] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): __UpperCAmelCase : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_) @jax.jit def model_jitted(UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int]): return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_) with self.subTest("JIT Enabled"): __UpperCAmelCase : Optional[Any] = model_jitted(**UpperCamelCase_).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): __UpperCAmelCase : Dict = model_jitted(**UpperCamelCase_).to_tuple() self.assertEqual(len(UpperCamelCase_) , len(UpperCamelCase_)) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_): self.assertEqual(jitted_output.shape , output.shape) def _UpperCamelCase ( ) -> Any: """simple docstring""" __UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_flax class a__ ( unittest.TestCase ): @cached_property def a_ ( self : Optional[int]): """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None @slow def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040") __UpperCAmelCase : Dict = self.default_image_processor __UpperCAmelCase : str = prepare_img() __UpperCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors="np") __UpperCAmelCase : Dict = model(**UpperCamelCase_) # verify the logits __UpperCAmelCase : Dict = (1, 1000) self.assertEqual(outputs.logits.shape , UpperCamelCase_) __UpperCAmelCase : Any = jnp.array([-0.4180, -1.5051, -3.4836]) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4))
77
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A = logging.get_logger(__name__) class a__ ( __magic_name__ , __magic_name__ ): lowercase_ = "maskformer-swin" lowercase_ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : List[str] , UpperCamelCase_ : Dict=224 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Any=96 , UpperCamelCase_ : Dict=[2, 2, 6, 2] , UpperCamelCase_ : Tuple=[3, 6, 12, 24] , UpperCamelCase_ : int=7 , UpperCamelCase_ : List[str]=4.0 , UpperCamelCase_ : int=True , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Tuple="gelu" , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : Tuple=1e-5 , UpperCamelCase_ : int=None , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : Dict , ): """simple docstring""" super().__init__(**UpperCamelCase_) __UpperCAmelCase : int = image_size __UpperCAmelCase : Optional[int] = patch_size __UpperCAmelCase : int = num_channels __UpperCAmelCase : Dict = embed_dim __UpperCAmelCase : List[Any] = depths __UpperCAmelCase : Tuple = len(UpperCamelCase_) __UpperCAmelCase : List[Any] = num_heads __UpperCAmelCase : List[str] = window_size __UpperCAmelCase : Optional[int] = mlp_ratio __UpperCAmelCase : int = qkv_bias __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob __UpperCAmelCase : Any = drop_path_rate __UpperCAmelCase : Tuple = hidden_act __UpperCAmelCase : Optional[int] = use_absolute_embeddings __UpperCAmelCase : Optional[Any] = layer_norm_eps __UpperCAmelCase : List[str] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __UpperCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(UpperCamelCase_) - 1)) __UpperCAmelCase : List[str] = ["stem"] + [F"stage{idx}" for idx in range(1 , len(UpperCamelCase_) + 1)] __UpperCAmelCase , __UpperCAmelCase : List[Any] = get_aligned_output_features_output_indices( out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names)
77
"""simple docstring""" from scipy.stats import spearmanr import datasets A = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ A = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ A = r"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def a_ ( self : Any): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float"), "references": datasets.Value("float"), }) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def a_ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False): """simple docstring""" __UpperCAmelCase : List[str] = spearmanr(UpperCamelCase_ , UpperCamelCase_) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
77
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: A = None A = logging.get_logger(__name__) A = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} A = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } A = { """facebook/nllb-large-en-ro""": 1_024, """facebook/nllb-200-distilled-600M""": 1_024, } # fmt: off A = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = ["input_ids", "attention_mask"] lowercase_ = NllbTokenizer lowercase_ = [] lowercase_ = [] def __init__( self : str , UpperCamelCase_ : Any=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : int="</s>" , UpperCamelCase_ : Union[str, Any]="</s>" , UpperCamelCase_ : List[Any]="<s>" , UpperCamelCase_ : Any="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : int="<mask>" , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Tuple=False , **UpperCamelCase_ : List[Any] , ): """simple docstring""" __UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else mask_token __UpperCAmelCase : Tuple = legacy_behaviour super().__init__( vocab_file=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , legacy_behaviour=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : List[str] = vocab_file __UpperCAmelCase : Union[str, Any] = False if not self.vocab_file else True __UpperCAmelCase : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens}) __UpperCAmelCase : Any = { lang_code: self.convert_tokens_to_ids(UpperCamelCase_) for lang_code in FAIRSEQ_LANGUAGE_CODES } __UpperCAmelCase : Optional[Any] = src_lang if src_lang is not None else "eng_Latn" __UpperCAmelCase : List[str] = self.convert_tokens_to_ids(self._src_lang) __UpperCAmelCase : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def a_ ( self : str): """simple docstring""" return self._src_lang @src_lang.setter def a_ ( self : List[str] , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def a_ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a_ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : List[Any] = [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] , UpperCamelCase_ : Optional[str] , **UpperCamelCase_ : List[Any]): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") __UpperCAmelCase : Any = src_lang __UpperCAmelCase : Optional[Any] = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : List[Any] = self.convert_tokens_to_ids(UpperCamelCase_) __UpperCAmelCase : str = tgt_lang_id return inputs def a_ ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str = "eng_Latn" , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "fra_Latn" , **UpperCamelCase_ : str , ): """simple docstring""" __UpperCAmelCase : List[Any] = src_lang __UpperCAmelCase : Dict = tgt_lang return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_) def a_ ( self : Any): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang) def a_ ( self : List[str]): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang) def a_ ( self : str , UpperCamelCase_ : Dict): """simple docstring""" __UpperCAmelCase : List[str] = self.convert_tokens_to_ids(UpperCamelCase_) if self.legacy_behaviour: __UpperCAmelCase : List[Any] = [] __UpperCAmelCase : Tuple = [self.eos_token_id, self.cur_lang_code] else: __UpperCAmelCase : str = [self.cur_lang_code] __UpperCAmelCase : int = [self.eos_token_id] __UpperCAmelCase : str = self.convert_ids_to_tokens(self.prefix_tokens) __UpperCAmelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens) __UpperCAmelCase : int = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def a_ ( self : Union[str, Any] , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.convert_tokens_to_ids(UpperCamelCase_) if self.legacy_behaviour: __UpperCAmelCase : str = [] __UpperCAmelCase : Union[str, Any] = [self.eos_token_id, self.cur_lang_code] else: __UpperCAmelCase : List[str] = [self.cur_lang_code] __UpperCAmelCase : Any = [self.eos_token_id] __UpperCAmelCase : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens) __UpperCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens) __UpperCAmelCase : Dict = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def a_ ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory.") return __UpperCAmelCase : Union[str, Any] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_): copyfile(self.vocab_file , UpperCamelCase_) return (out_vocab_file,)
77
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) A = {"""vocab_file""": """spiece.model"""} A = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } A = {"""bert_for_seq_generation""": 512} class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = [] lowercase_ = ["input_ids", "attention_mask"] def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="<::::>" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : List[Any] , ): """simple docstring""" __UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __UpperCAmelCase : Dict = vocab_file __UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(UpperCamelCase_) @property def a_ ( self : List[str]): """simple docstring""" return self.sp_model.get_piece_size() def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : int = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : int): """simple docstring""" __UpperCAmelCase : Optional[int] = self.__dict__.copy() __UpperCAmelCase : List[Any] = None return state def __setstate__( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs"): __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def a_ ( self : Any , UpperCamelCase_ : str): """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_) def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" return self.sp_model.piece_to_id(UpperCamelCase_) def a_ ( self : Tuple , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : int = self.sp_model.IdToPiece(UpperCamelCase_) return token def a_ ( self : Dict , UpperCamelCase_ : Optional[Any]): """simple docstring""" __UpperCAmelCase : int = [] __UpperCAmelCase : Tuple = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(UpperCamelCase_) + token __UpperCAmelCase : List[Any] = [] else: current_sub_tokens.append(UpperCamelCase_) out_string += self.sp_model.decode(UpperCamelCase_) return out_string.strip() def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : Tuple = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCamelCase_) elif not os.path.isfile(self.vocab_file): with open(UpperCamelCase_ , "wb") as fi: __UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_) return (out_vocab_file,)
77
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input A = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine""" def _UpperCamelCase ( ) -> int: """simple docstring""" __UpperCAmelCase : int = _ask_options( "In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: __UpperCAmelCase : str = get_sagemaker_input() else: __UpperCAmelCase : List[Any] = get_cluster_input() return config def _UpperCamelCase ( UpperCamelCase=None ) -> Tuple: """simple docstring""" if subparsers is not None: __UpperCAmelCase : Tuple = subparsers.add_parser("config" , description=UpperCamelCase ) else: __UpperCAmelCase : int = argparse.ArgumentParser("Accelerate config command" , description=UpperCamelCase ) parser.add_argument( "--config_file" , default=UpperCamelCase , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=UpperCamelCase ) return parser def _UpperCamelCase ( UpperCamelCase ) -> Dict: """simple docstring""" __UpperCAmelCase : int = get_user_input() if args.config_file is not None: __UpperCAmelCase : Tuple = args.config_file else: if not os.path.isdir(UpperCamelCase ): os.makedirs(UpperCamelCase ) __UpperCAmelCase : Optional[int] = default_yaml_config_file if config_file.endswith(".json" ): config.to_json_file(UpperCamelCase ) else: config.to_yaml_file(UpperCamelCase ) print(f"accelerate configuration saved at {config_file}" ) def _UpperCamelCase ( ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : Optional[Any] = config_command_parser() __UpperCAmelCase : Union[str, Any] = parser.parse_args() config_command(UpperCamelCase ) if __name__ == "__main__": main()
77
"""simple docstring""" import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed A = """true""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=16 ) -> Tuple: """simple docstring""" set_seed(42 ) __UpperCAmelCase : Dict = RegressionModel() __UpperCAmelCase : Optional[Any] = deepcopy(UpperCamelCase ) __UpperCAmelCase : Any = RegressionDataset(length=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = DataLoader(UpperCamelCase , batch_size=UpperCamelCase ) model.to(accelerator.device ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.prepare(UpperCamelCase , UpperCamelCase ) return model, ddp_model, dataloader def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=False ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) __UpperCAmelCase : Dict = load_dataset("glue" , "mrpc" , split="validation" ) def tokenize_function(UpperCamelCase ): __UpperCAmelCase : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase , max_length=UpperCamelCase ) return outputs with accelerator.main_process_first(): __UpperCAmelCase : str = dataset.map( UpperCamelCase , batched=UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) __UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(UpperCamelCase ): if use_longest: return tokenizer.pad(UpperCamelCase , padding="longest" , return_tensors="pt" ) return tokenizer.pad(UpperCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" ) return DataLoader(UpperCamelCase , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=16 ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[Any] = Accelerator(dispatch_batches=UpperCamelCase , split_batches=UpperCamelCase ) __UpperCAmelCase : int = get_dataloader(UpperCamelCase , not dispatch_batches ) __UpperCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" , return_dict=UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(UpperCamelCase , UpperCamelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Dict = [] for batch in dataloader: __UpperCAmelCase , __UpperCAmelCase : int = batch.values() with torch.no_grad(): __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = [], [] for logit, targ in logits_and_targets: logits.append(UpperCamelCase ) targs.append(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = torch.cat(UpperCamelCase ), torch.cat(UpperCamelCase ) return logits, targs def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=16 ) -> int: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = get_basic_setup(UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = generate_predictions(UpperCamelCase , UpperCamelCase , UpperCamelCase ) assert ( len(UpperCamelCase ) == num_samples ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCamelCase )}" def _UpperCamelCase ( UpperCamelCase = False , UpperCamelCase = False ) -> List[str]: """simple docstring""" __UpperCAmelCase : List[str] = evaluate.load("glue" , "mrpc" ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = get_mrpc_setup(UpperCamelCase , UpperCamelCase ) # First do baseline __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = setup["no"] model.to(UpperCamelCase ) model.eval() for batch in dataloader: batch.to(UpperCamelCase ) with torch.inference_mode(): __UpperCAmelCase : List[str] = model(**UpperCamelCase ) __UpperCAmelCase : str = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=UpperCamelCase , references=batch["labels"] ) __UpperCAmelCase : str = metric.compute() # Then do distributed __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): __UpperCAmelCase : Any = model(**UpperCamelCase ) __UpperCAmelCase : str = outputs.logits.argmax(dim=-1 ) __UpperCAmelCase : Union[str, Any] = batch["labels"] __UpperCAmelCase , __UpperCAmelCase : Any = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=UpperCamelCase , references=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n" def _UpperCamelCase ( ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Dict = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" ) test_mrpc(UpperCamelCase , UpperCamelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: __UpperCAmelCase : Union[str, Any] = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase ) if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" ) test_torch_metrics(UpperCamelCase , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) __UpperCAmelCase : Any = Accelerator() test_torch_metrics(UpperCamelCase , 512 ) accelerator.state._reset_state() def _UpperCamelCase ( UpperCamelCase ) -> Optional[Any]: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
77
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A = { """configuration_time_series_transformer""": [ """TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimeSeriesTransformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TimeSeriesTransformerForPrediction""", """TimeSeriesTransformerModel""", """TimeSeriesTransformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
"""simple docstring""" import math def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = 0 , UpperCamelCase = 0 ) -> list: """simple docstring""" __UpperCAmelCase : Union[str, Any] = end or len(UpperCamelCase ) for i in range(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : List[Any] = i __UpperCAmelCase : Any = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __UpperCAmelCase : Dict = array[temp_index - 1] temp_index -= 1 __UpperCAmelCase : str = temp_index_value return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None: # Max Heap """simple docstring""" __UpperCAmelCase : Optional[Any] = index __UpperCAmelCase : List[str] = 2 * index + 1 # Left Node __UpperCAmelCase : Union[str, Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __UpperCAmelCase : Tuple = left_index if right_index < heap_size and array[largest] < array[right_index]: __UpperCAmelCase : int = right_index if largest != index: __UpperCAmelCase , __UpperCAmelCase : List[str] = array[largest], array[index] heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" __UpperCAmelCase : List[Any] = len(UpperCamelCase ) for i in range(n // 2 , -1 , -1 ): heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) for i in range(n - 1 , 0 , -1 ): __UpperCAmelCase , __UpperCAmelCase : int = array[0], array[i] heapify(UpperCamelCase , 0 , UpperCamelCase ) return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Optional[Any] = low __UpperCAmelCase : List[str] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __UpperCAmelCase , __UpperCAmelCase : Optional[int] = array[j], array[i] i += 1 def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" if len(UpperCamelCase ) == 0: return array __UpperCAmelCase : Optional[int] = 2 * math.ceil(math.loga(len(UpperCamelCase ) ) ) __UpperCAmelCase : List[Any] = 16 return intro_sort(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(UpperCamelCase ) max_depth -= 1 __UpperCAmelCase : List[Any] = median_of_a(UpperCamelCase , UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 ) __UpperCAmelCase : Union[str, Any] = partition(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) intro_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = p return insertion_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() A = input("""Enter numbers separated by a comma : """).strip() A = [float(item) for item in user_input.split(""",""")] print(sort(unsorted))
77
1
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva A = """""" A = """""" A = """""" A = 1 # (0 is vertical, 1 is horizontal) def _UpperCamelCase ( ) -> None: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase , UpperCamelCase ) print("Processing..." ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = update_image_and_anno(UpperCamelCase , UpperCamelCase , UpperCamelCase ) for index, image in enumerate(UpperCamelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __UpperCAmelCase : List[Any] = random_chars(32 ) __UpperCAmelCase : List[str] = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0] __UpperCAmelCase : str = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}" cva.imwrite(f"/{file_root}.jpg" , UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f"Success {index+1}/{len(UpperCamelCase )} with {file_name}" ) __UpperCAmelCase : Dict = [] for anno in new_annos[index]: __UpperCAmelCase : Any = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}" annos_list.append(UpperCamelCase ) with open(f"/{file_root}.txt" , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> tuple[list, list]: """simple docstring""" __UpperCAmelCase : Optional[Any] = [] __UpperCAmelCase : Optional[int] = [] for label_file in glob.glob(os.path.join(UpperCamelCase , "*.txt" ) ): __UpperCAmelCase : str = label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(UpperCamelCase ) as in_file: __UpperCAmelCase : int = in_file.readlines() __UpperCAmelCase : Dict = os.path.join(UpperCamelCase , f"{label_name}.jpg" ) __UpperCAmelCase : int = [] for obj_list in obj_lists: __UpperCAmelCase : Union[str, Any] = obj_list.rstrip("\n" ).split(" " ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(UpperCamelCase ) labels.append(UpperCamelCase ) return img_paths, labels def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 ) -> tuple[list, list, list]: """simple docstring""" __UpperCAmelCase : Dict = [] __UpperCAmelCase : Tuple = [] __UpperCAmelCase : Optional[Any] = [] for idx in range(len(UpperCamelCase ) ): __UpperCAmelCase : Tuple = [] __UpperCAmelCase : Optional[Any] = img_list[idx] path_list.append(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = anno_list[idx] __UpperCAmelCase : List[str] = cva.imread(UpperCamelCase ) if flip_type == 1: __UpperCAmelCase : List[str] = cva.flip(UpperCamelCase , UpperCamelCase ) for bbox in img_annos: __UpperCAmelCase : int = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __UpperCAmelCase : List[str] = cva.flip(UpperCamelCase , UpperCamelCase ) for bbox in img_annos: __UpperCAmelCase : Optional[int] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(UpperCamelCase ) new_imgs_list.append(UpperCamelCase ) return new_imgs_list, new_annos_lists, path_list def _UpperCamelCase ( UpperCamelCase = 32 ) -> str: """simple docstring""" assert number_char > 1, "The number of character should greater than 1" __UpperCAmelCase : List[str] = ascii_lowercase + digits return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) ) if __name__ == "__main__": main() print("""DONE ✅""")
77
"""simple docstring""" import numpy as np from PIL import Image def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : str = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Any = 0 __UpperCAmelCase : Dict = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Tuple = 0 # compute the shape of the output matrix __UpperCAmelCase : Optional[int] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __UpperCAmelCase : List[str] = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __UpperCAmelCase : str = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : int = 0 __UpperCAmelCase : int = 0 return updated_arr def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : List[str] = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : List[str] = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Any = 0 # compute the shape of the output matrix __UpperCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __UpperCAmelCase : str = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __UpperCAmelCase : Tuple = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Optional[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="""avgpooling""", verbose=True) # Loading the image A = Image.open("""path_to_image""") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
77
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin A = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right A = 250_004 A = 250_020 @require_sentencepiece @require_tokenizers class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = MBartTokenizer lowercase_ = MBartTokenizerFast lowercase_ = True lowercase_ = True def a_ ( self : str): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase : Any = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Dict = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer.tokenize("This is a test") self.assertListEqual(UpperCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __UpperCAmelCase : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) __UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def a_ ( self : Dict): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __UpperCAmelCase : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"): __UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = tempfile.mkdtemp() __UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_) __UpperCAmelCase : Any = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) __UpperCAmelCase : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=True __UpperCAmelCase : Optional[int] = tempfile.mkdtemp() __UpperCAmelCase : Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : int = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=False __UpperCAmelCase : Tuple = tempfile.mkdtemp() __UpperCAmelCase : int = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way __UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : str = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) @require_torch @require_sentencepiece @require_tokenizers class a__ ( unittest.TestCase ): lowercase_ = "facebook/mbart-large-en-ro" lowercase_ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] lowercase_ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] lowercase_ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def a_ ( cls : int): """simple docstring""" __UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO") __UpperCAmelCase : Union[str, Any] = 1 return cls def a_ ( self : List[Any]): """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) def a_ ( self : Optional[int]): """simple docstring""" self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids) __UpperCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] __UpperCAmelCase : Optional[Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_) self.assertEqual(UpperCamelCase_ , UpperCamelCase_) self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , UpperCamelCase_) __UpperCAmelCase : Tuple = 10 __UpperCAmelCase : List[Any] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_).input_ids[0] self.assertEqual(ids[-2] , 2) self.assertEqual(ids[-1] , UpperCamelCase_) self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_) def a_ ( self : Any): """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [250026, 250001]) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : List[str] = tempfile.mkdtemp() __UpperCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCamelCase_) __UpperCAmelCase : List[Any] = MBartTokenizer.from_pretrained(UpperCamelCase_) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_) @require_torch def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors="pt") __UpperCAmelCase : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Dict = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , ) __UpperCAmelCase : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_) self.assertEqual((2, 14) , batch.input_ids.shape) self.assertEqual((2, 14) , batch.attention_mask.shape) __UpperCAmelCase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , []) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE]) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : List[str] = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors="pt") __UpperCAmelCase : Any = self.tokenizer( text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors="pt") __UpperCAmelCase : int = targets["input_ids"] __UpperCAmelCase : Any = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1] , 3) self.assertEqual(batch.decoder_input_ids.shape[1] , 10) @require_torch def a_ ( self : int): """simple docstring""" __UpperCAmelCase : int = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR") self.assertEqual( nested_simplify(UpperCamelCase_) , { # A, test, EOS, en_XX "input_ids": [[62, 3034, 2, 250004]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, } , )
77
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: A = None A = logging.get_logger(__name__) A = """▁""" A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} A = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } A = { """google/pegasus-xsum""": 512, } class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PegasusTokenizer lowercase_ = ["input_ids", "attention_mask"] def __init__( self : str , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Any="<unk>" , UpperCamelCase_ : Tuple="<mask_2>" , UpperCamelCase_ : Any="<mask_1>" , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : str=103 , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" __UpperCAmelCase : Optional[int] = offset if additional_special_tokens is not None: if not isinstance(UpperCamelCase_ , UpperCamelCase_): raise TypeError( F"additional_special_tokens should be of type {type(UpperCamelCase_)}, but is" F" {type(UpperCamelCase_)}") __UpperCAmelCase : Any = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F"<unk_{i}>" for i in range(len(UpperCamelCase_) , self.offset - 1) ] if len(set(UpperCamelCase_)) != len(UpperCamelCase_): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.") __UpperCAmelCase : str = additional_special_tokens_extended else: __UpperCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset)] super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , pad_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = vocab_file __UpperCAmelCase : List[str] = False if not self.vocab_file else True def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : int = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens) + 3)): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" F" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}") return [1 if x in all_special_ids else 0 for x in seq] def a_ ( self : Union[str, Any] , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return self._special_token_mask(UpperCamelCase_) elif token_ids_a is None: return self._special_token_mask(UpperCamelCase_) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a) + [1] def a_ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None): """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def a_ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_): copyfile(self.vocab_file , UpperCamelCase_) return (out_vocab_file,)
77
1
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" return number | (1 << position) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" return number & ~(1 << position) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" return number ^ (1 << position) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> bool: """simple docstring""" return ((number >> position) & 1) == 1 def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
77
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: """simple docstring""" # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file __UpperCAmelCase : Optional[Any] = TapasConfig.from_json_file(UpperCamelCase ) # set absolute/relative position embeddings parameter __UpperCAmelCase : Optional[Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": __UpperCAmelCase : List[str] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "WTQ": # run_task_main.py hparams __UpperCAmelCase : Tuple = 4 __UpperCAmelCase : Any = True # hparam_utils.py hparams __UpperCAmelCase : Union[str, Any] = 0.664694 __UpperCAmelCase : Union[str, Any] = 0.207951 __UpperCAmelCase : int = 0.121194 __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : List[str] = True __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : List[str] = 0.0352513 __UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams __UpperCAmelCase : int = 4 __UpperCAmelCase : Optional[int] = False # hparam_utils.py hparams __UpperCAmelCase : int = 36.4519 __UpperCAmelCase : str = 0.903421 __UpperCAmelCase : Dict = 222.088 __UpperCAmelCase : Dict = True __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : Tuple = True __UpperCAmelCase : Any = 0.763141 __UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "TABFACT": __UpperCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase ) elif task == "MLM": __UpperCAmelCase : Tuple = TapasForMaskedLM(config=UpperCamelCase ) elif task == "INTERMEDIATE_PRETRAINING": __UpperCAmelCase : List[str] = TapasModel(config=UpperCamelCase ) else: raise ValueError(f"Task {task} not supported." ) print(f"Building PyTorch model from configuration: {config}" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Save pytorch-model (weights and configuration) print(f"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(UpperCamelCase ) # Save tokenizer files print(f"Save tokenizer files to {pytorch_dump_path}" ) __UpperCAmelCase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 ) tokenizer.save_pretrained(UpperCamelCase ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) A = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
77
1
"""simple docstring""" import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() A = logging.get_logger(__name__) A = """https://openaipublic.azureedge.net/jukebox/models/""" A = { """jukebox-1b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """1b_lyrics/prior_level_2.pth.tar""", ], """jukebox-5b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """5b_lyrics/prior_level_2.pth.tar""", ], } def _UpperCamelCase ( UpperCamelCase ) -> Union[str, Any]: """simple docstring""" if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10: __UpperCAmelCase : str = key.replace(".model.1.bias" , ".conv1d_1.bias" ) elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10: __UpperCAmelCase : Tuple = key.replace(".model.1.weight" , ".conv1d_1.weight" ) elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10: __UpperCAmelCase : List[Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" ) elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10: __UpperCAmelCase : List[Any] = key.replace(".model.3.weight" , ".conv1d_2.weight" ) if "conditioner_blocks.0." in key: __UpperCAmelCase : Any = key.replace("conditioner_blocks.0" , "conditioner_blocks" ) if "prime_prior" in key: __UpperCAmelCase : Dict = key.replace("prime_prior" , "encoder" ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: __UpperCAmelCase : Dict = key.replace(".emb." , "." ) if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook return key.replace(".k" , ".codebook" ) if "y_emb." in key: return key.replace("y_emb." , "metadata_embedding." ) if "x_emb.emb." in key: __UpperCAmelCase : str = key.replace("0.x_emb.emb" , "embed_tokens" ) if "prime_state_ln" in key: return key.replace("prime_state_ln" , "encoder.final_layer_norm" ) if ".ln" in key: return key.replace(".ln" , ".layer_norm" ) if "_ln" in key: return key.replace("_ln" , "_layer_norm" ) if "prime_state_proj" in key: return key.replace("prime_state_proj" , "encoder.proj_in" ) if "prime_x_out" in key: return key.replace("prime_x_out" , "encoder.lm_head" ) if "prior.x_out" in key: return key.replace("x_out" , "fc_proj_out" ) if "x_emb" in key: return key.replace("x_emb" , "embed_tokens" ) return key def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : Union[str, Any] = {} import re __UpperCAmelCase : Tuple = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" ) __UpperCAmelCase : Union[str, Any] = re.compile( R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) __UpperCAmelCase : Optional[Any] = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" ) __UpperCAmelCase : int = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" ) __UpperCAmelCase : Optional[int] = re.compile( R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) __UpperCAmelCase : Union[str, Any] = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" ) __UpperCAmelCase : Any = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" ) __UpperCAmelCase : List[Any] = re.compile( R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) __UpperCAmelCase : Any = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(UpperCamelCase ): __UpperCAmelCase : str = re_encoder_block_conv_in.match(UpperCamelCase ) __UpperCAmelCase : Dict = regex_match.groups() __UpperCAmelCase : List[str] = int(groups[2] ) * 2 + int(groups[3] ) __UpperCAmelCase : List[str] = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}" __UpperCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(UpperCamelCase , UpperCamelCase ) elif re_encoder_block_resnet.fullmatch(UpperCamelCase ): __UpperCAmelCase : Tuple = re_encoder_block_resnet.match(UpperCamelCase ) __UpperCAmelCase : Tuple = regex_match.groups() __UpperCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) __UpperCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]] __UpperCAmelCase : Union[str, Any] = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}." __UpperCAmelCase : Dict = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" __UpperCAmelCase : Optional[int] = prefix + resnet_block __UpperCAmelCase : Union[str, Any] = re_encoder_block_resnet.sub(UpperCamelCase , UpperCamelCase ) elif re_encoder_block_proj_out.fullmatch(UpperCamelCase ): __UpperCAmelCase : Optional[int] = re_encoder_block_proj_out.match(UpperCamelCase ) __UpperCAmelCase : Optional[Any] = regex_match.groups() __UpperCAmelCase : Tuple = f"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}" __UpperCAmelCase : Dict = re_encoder_block_proj_out.sub(UpperCamelCase , UpperCamelCase ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(UpperCamelCase ): __UpperCAmelCase : Tuple = re_decoder_block_conv_out.match(UpperCamelCase ) __UpperCAmelCase : Dict = regex_match.groups() __UpperCAmelCase : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2 __UpperCAmelCase : Tuple = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}" __UpperCAmelCase : Any = re_decoder_block_conv_out.sub(UpperCamelCase , UpperCamelCase ) elif re_decoder_block_resnet.fullmatch(UpperCamelCase ): __UpperCAmelCase : Optional[Any] = re_decoder_block_resnet.match(UpperCamelCase ) __UpperCAmelCase : Optional[Any] = regex_match.groups() __UpperCAmelCase : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2 __UpperCAmelCase : Optional[int] = {"1": 1, "3": 2}[groups[-2]] __UpperCAmelCase : Union[str, Any] = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}." __UpperCAmelCase : Optional[int] = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" __UpperCAmelCase : Optional[Any] = prefix + resnet_block __UpperCAmelCase : Union[str, Any] = re_decoder_block_resnet.sub(UpperCamelCase , UpperCamelCase ) elif re_decoder_block_proj_in.fullmatch(UpperCamelCase ): __UpperCAmelCase : Union[str, Any] = re_decoder_block_proj_in.match(UpperCamelCase ) __UpperCAmelCase : List[Any] = regex_match.groups() __UpperCAmelCase : Optional[int] = f"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}" __UpperCAmelCase : Union[str, Any] = re_decoder_block_proj_in.sub(UpperCamelCase , UpperCamelCase ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(UpperCamelCase ): __UpperCAmelCase : str = re_prior_cond_conv_out.match(UpperCamelCase ) __UpperCAmelCase : List[Any] = regex_match.groups() __UpperCAmelCase : Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2 __UpperCAmelCase : Optional[Any] = f"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}" __UpperCAmelCase : Union[str, Any] = re_prior_cond_conv_out.sub(UpperCamelCase , UpperCamelCase ) elif re_prior_cond_resnet.fullmatch(UpperCamelCase ): __UpperCAmelCase : Optional[Any] = re_prior_cond_resnet.match(UpperCamelCase ) __UpperCAmelCase : List[Any] = regex_match.groups() __UpperCAmelCase : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2 __UpperCAmelCase : Optional[int] = {"1": 1, "3": 2}[groups[-2]] __UpperCAmelCase : List[Any] = f"conditioner_blocks.upsampler.upsample_block.{block_index}." __UpperCAmelCase : List[Any] = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" __UpperCAmelCase : List[Any] = prefix + resnet_block __UpperCAmelCase : List[str] = re_prior_cond_resnet.sub(UpperCamelCase , UpperCamelCase ) elif re_prior_cond_proj_in.fullmatch(UpperCamelCase ): __UpperCAmelCase : Any = re_prior_cond_proj_in.match(UpperCamelCase ) __UpperCAmelCase : Dict = regex_match.groups() __UpperCAmelCase : Tuple = f"conditioner_blocks.upsampler.proj_in.{groups[-1]}" __UpperCAmelCase : Dict = re_prior_cond_proj_in.sub(UpperCamelCase , UpperCamelCase ) # keep original key else: __UpperCAmelCase : int = original_key __UpperCAmelCase : List[str] = replace_key(UpperCamelCase ) if f"{key_prefix}.{key}" not in model_state_dict or key is None: print(f"failed converting {original_key} to {key}, does not match" ) # handle missmatched shape elif value.shape != model_state_dict[f"{key_prefix}.{key}"].shape: __UpperCAmelCase : Optional[Any] = model_state_dict[f"{key_prefix}.{key}"] print(f"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" ) __UpperCAmelCase : Dict = original_key __UpperCAmelCase : str = original_key __UpperCAmelCase : Dict = value return new_dict @torch.no_grad() def _UpperCamelCase ( UpperCamelCase=None , UpperCamelCase=None ) -> Optional[int]: """simple docstring""" for file in MODEL_MAPPING[model_name]: if not os.path.isfile(f"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ): __UpperCAmelCase : Optional[int] = requests.get(f"{PREFIX}{file}" , allow_redirects=UpperCamelCase ) os.makedirs(f"{pytorch_dump_folder_path}/" , exist_ok=UpperCamelCase ) open(f"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content ) __UpperCAmelCase : List[Any] = MODEL_MAPPING[model_name.split("/" )[-1]] __UpperCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = JukeboxModel(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = [] __UpperCAmelCase : Tuple = {} for i, dict_name in enumerate(UpperCamelCase ): __UpperCAmelCase : Optional[int] = torch.load(f"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"] __UpperCAmelCase : List[Any] = {} for k in old_dic.keys(): if k.endswith(".b" ): __UpperCAmelCase : int = old_dic[k] elif k.endswith(".w" ): __UpperCAmelCase : Optional[Any] = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: __UpperCAmelCase : Union[str, Any] = old_dic[k] else: __UpperCAmelCase : Any = old_dic[k] __UpperCAmelCase : Union[str, Any] = "vqvae" if i == 0 else f"priors.{3 - i}" __UpperCAmelCase : List[str] = fix_jukebox_keys(UpperCamelCase , model.state_dict() , UpperCamelCase , UpperCamelCase ) weight_dict.append(UpperCamelCase ) __UpperCAmelCase : Optional[Any] = weight_dict.pop(0 ) model.vqvae.load_state_dict(UpperCamelCase ) for i in range(len(UpperCamelCase ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) with open(f"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile: json.dump(UpperCamelCase , UpperCamelCase ) print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(UpperCamelCase ) return weight_dict if __name__ == "__main__": A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""jukebox-5b-lyrics""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""jukebox-5b-lyrics-converted""", type=str, help="""Path to the output PyTorch model directory.""", ) A = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
77
"""simple docstring""" from typing import Union import fire import torch from tqdm import tqdm def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = "cpu" , UpperCamelCase = None ) -> None: """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch.load(UpperCamelCase , map_location=UpperCamelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(UpperCamelCase , torch.Tensor ): raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" ) __UpperCAmelCase : Optional[Any] = v.half() if save_path is None: # overwrite src_path __UpperCAmelCase : str = src_path torch.save(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": fire.Fire(convert)
77
1
"""simple docstring""" import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : List[Any] = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase ) -> Tuple: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : List[Any] = emb.weight.shape __UpperCAmelCase : Optional[Any] = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = emb.weight.data return lin_layer def _UpperCamelCase ( UpperCamelCase ) -> Dict: """simple docstring""" __UpperCAmelCase : Any = torch.load(UpperCamelCase , map_location="cpu" ) __UpperCAmelCase : Union[str, Any] = mam_aaa["args"] or mam_aaa["cfg"]["model"] __UpperCAmelCase : Optional[Any] = mam_aaa["model"] remove_ignore_keys_(UpperCamelCase ) __UpperCAmelCase : int = state_dict["encoder.embed_tokens.weight"].shape[0] __UpperCAmelCase : int = MaMaaaConfig( vocab_size=UpperCamelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , ) __UpperCAmelCase : Optional[int] = state_dict["decoder.embed_tokens.weight"] __UpperCAmelCase : List[str] = MaMaaaForConditionalGeneration(UpperCamelCase ) model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase ) __UpperCAmelCase : Any = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": A = argparse.ArgumentParser() # Required parameters parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") A = parser.parse_args() A = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
77
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": A = pd.read_csv("""sample_data.csv""", header=None) A = df.shape[:1][0] # If you're using some other dataset input the target column A = df.iloc[:, 1:2] A = actual_data.values.reshape(len_data, 1) A = MinMaxScaler().fit_transform(actual_data) A = 10 A = 5 A = 20 A = len_data - periods * look_back A = actual_data[:division] A = actual_data[division - look_back :] A , A = [], [] A , A = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) A = np.array(train_x) A = np.array(test_x) A = np.array([list(i.ravel()) for i in train_y]) A = np.array([list(i.ravel()) for i in test_y]) A = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") A = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) A = model.predict(x_test)
77
1
"""simple docstring""" # This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase : Any = multiprocessing.Manager() __UpperCAmelCase : Dict = manager.list() __UpperCAmelCase : Union[str, Any] = multiprocessing.Process(target=UpperCamelCase , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("timed out" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: """simple docstring""" with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil __UpperCAmelCase : Dict = shutil.rmtree __UpperCAmelCase : Optional[Any] = os.rmdir __UpperCAmelCase : int = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: __UpperCAmelCase : Any = {} with swallow_io(): with time_limit(UpperCamelCase ): exec(UpperCamelCase , UpperCamelCase ) result.append("passed" ) except TimeoutException: result.append("timed out" ) except BaseException as e: result.append(f"failed: {e}" ) # Needed for cleaning up. __UpperCAmelCase : Union[str, Any] = rmtree __UpperCAmelCase : Union[str, Any] = rmdir __UpperCAmelCase : int = chdir @contextlib.contextmanager def _UpperCamelCase ( UpperCamelCase ) -> str: """simple docstring""" def signal_handler(UpperCamelCase , UpperCamelCase ): raise TimeoutException("Timed out!" ) signal.setitimer(signal.ITIMER_REAL , UpperCamelCase ) signal.signal(signal.SIGALRM , UpperCamelCase ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def _UpperCamelCase ( ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase : List[Any] = WriteOnlyStringIO() with contextlib.redirect_stdout(UpperCamelCase ): with contextlib.redirect_stderr(UpperCamelCase ): with redirect_stdin(UpperCamelCase ): yield @contextlib.contextmanager def _UpperCamelCase ( ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as dirname: with chdir(UpperCamelCase ): yield dirname class a__ ( __magic_name__ ): pass class a__ ( io.StringIO ): def a_ ( self : Union[str, Any] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : List[str]): """simple docstring""" raise OSError def a_ ( self : str , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[Any]): """simple docstring""" raise OSError def a_ ( self : Any , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[Any]): """simple docstring""" raise OSError def a_ ( self : Optional[int] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Tuple): """simple docstring""" return False class a__ ( contextlib._RedirectStream ): # type: ignore lowercase_ = "stdin" @contextlib.contextmanager def _UpperCamelCase ( UpperCamelCase ) -> Dict: """simple docstring""" if root == ".": yield return __UpperCAmelCase : Tuple = os.getcwd() os.chdir(UpperCamelCase ) try: yield except BaseException as exc: raise exc finally: os.chdir(UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase=None ) -> int: """simple docstring""" if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins __UpperCAmelCase : Any = None __UpperCAmelCase : int = None import os __UpperCAmelCase : Tuple = "1" __UpperCAmelCase : List[Any] = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Union[str, Any] = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : List[str] = None __UpperCAmelCase : Any = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : Any = None __UpperCAmelCase : List[str] = None __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : Union[str, Any] = None __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Union[str, Any] = None __UpperCAmelCase : Any = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : Union[str, Any] = None __UpperCAmelCase : str = None __UpperCAmelCase : List[str] = None __UpperCAmelCase : Union[str, Any] = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : Dict = None __UpperCAmelCase : int = None __UpperCAmelCase : Dict = None import shutil __UpperCAmelCase : str = None __UpperCAmelCase : Union[str, Any] = None __UpperCAmelCase : List[str] = None import subprocess __UpperCAmelCase : Optional[int] = None # type: ignore __UpperCAmelCase : str = None import sys __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Optional[int] = None
77
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin A = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right A = 250_004 A = 250_020 @require_sentencepiece @require_tokenizers class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = MBartTokenizer lowercase_ = MBartTokenizerFast lowercase_ = True lowercase_ = True def a_ ( self : str): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase : Any = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Dict = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer.tokenize("This is a test") self.assertListEqual(UpperCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __UpperCAmelCase : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) __UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def a_ ( self : Dict): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __UpperCAmelCase : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"): __UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = tempfile.mkdtemp() __UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_) __UpperCAmelCase : Any = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) __UpperCAmelCase : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=True __UpperCAmelCase : Optional[int] = tempfile.mkdtemp() __UpperCAmelCase : Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : int = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=False __UpperCAmelCase : Tuple = tempfile.mkdtemp() __UpperCAmelCase : int = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way __UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : str = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) @require_torch @require_sentencepiece @require_tokenizers class a__ ( unittest.TestCase ): lowercase_ = "facebook/mbart-large-en-ro" lowercase_ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] lowercase_ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] lowercase_ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def a_ ( cls : int): """simple docstring""" __UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO") __UpperCAmelCase : Union[str, Any] = 1 return cls def a_ ( self : List[Any]): """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) def a_ ( self : Optional[int]): """simple docstring""" self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids) __UpperCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] __UpperCAmelCase : Optional[Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_) self.assertEqual(UpperCamelCase_ , UpperCamelCase_) self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , UpperCamelCase_) __UpperCAmelCase : Tuple = 10 __UpperCAmelCase : List[Any] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_).input_ids[0] self.assertEqual(ids[-2] , 2) self.assertEqual(ids[-1] , UpperCamelCase_) self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_) def a_ ( self : Any): """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [250026, 250001]) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : List[str] = tempfile.mkdtemp() __UpperCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCamelCase_) __UpperCAmelCase : List[Any] = MBartTokenizer.from_pretrained(UpperCamelCase_) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_) @require_torch def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors="pt") __UpperCAmelCase : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Dict = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , ) __UpperCAmelCase : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_) self.assertEqual((2, 14) , batch.input_ids.shape) self.assertEqual((2, 14) , batch.attention_mask.shape) __UpperCAmelCase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , []) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE]) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : List[str] = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors="pt") __UpperCAmelCase : Any = self.tokenizer( text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors="pt") __UpperCAmelCase : int = targets["input_ids"] __UpperCAmelCase : Any = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1] , 3) self.assertEqual(batch.decoder_input_ids.shape[1] , 10) @require_torch def a_ ( self : int): """simple docstring""" __UpperCAmelCase : int = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR") self.assertEqual( nested_simplify(UpperCamelCase_) , { # A, test, EOS, en_XX "input_ids": [[62, 3034, 2, 250004]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, } , )
77
1
"""simple docstring""" from __future__ import annotations def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: # noqa: E741 """simple docstring""" while r - l > 1: __UpperCAmelCase : Dict = (l + r) // 2 if v[m] >= key: __UpperCAmelCase : Optional[int] = m else: __UpperCAmelCase : List[str] = m # noqa: E741 return r def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" if len(UpperCamelCase ) == 0: return 0 __UpperCAmelCase : Optional[int] = [0] * len(UpperCamelCase ) __UpperCAmelCase : str = 1 __UpperCAmelCase : List[Any] = v[0] for i in range(1 , len(UpperCamelCase ) ): if v[i] < tail[0]: __UpperCAmelCase : Any = v[i] elif v[i] > tail[length - 1]: __UpperCAmelCase : Optional[Any] = v[i] length += 1 else: __UpperCAmelCase : int = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
77
"""simple docstring""" from typing import Any class a__ : def __init__( self : List[str] , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : str = data __UpperCAmelCase : Optional[Any] = None class a__ : def __init__( self : Any): """simple docstring""" __UpperCAmelCase : Optional[int] = None def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.head while temp is not None: print(temp.data , end=" ") __UpperCAmelCase : Tuple = temp.next print() def a_ ( self : int , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : List[str] = Node(UpperCamelCase_) __UpperCAmelCase : str = self.head __UpperCAmelCase : Optional[int] = new_node def a_ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str): """simple docstring""" if node_data_a == node_data_a: return else: __UpperCAmelCase : int = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Tuple = node_a.next __UpperCAmelCase : List[Any] = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Optional[Any] = node_a.next if node_a is None or node_a is None: return __UpperCAmelCase , __UpperCAmelCase : Any = node_a.data, node_a.data if __name__ == "__main__": A = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("""After swapping""") ll.print_list()
77
1
"""simple docstring""" from __future__ import annotations from collections.abc import Callable def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 100 , ) -> float: """simple docstring""" __UpperCAmelCase : Optional[int] = x_start __UpperCAmelCase : List[str] = fnc(UpperCamelCase ) __UpperCAmelCase : Tuple = 0.0 for _ in range(UpperCamelCase ): # Approximates small segments of curve as linear and solve # for trapezoidal area __UpperCAmelCase : int = (x_end - x_start) / steps + xa __UpperCAmelCase : Tuple = fnc(UpperCamelCase ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step __UpperCAmelCase : List[Any] = xa __UpperCAmelCase : Dict = fxa return area if __name__ == "__main__": def _UpperCamelCase ( UpperCamelCase ) -> Optional[int]: """simple docstring""" return x**3 + x**2 print("""f(x) = x^3 + x^2""") print("""The area between the curve, x = -5, x = 5 and the x axis is:""") A = 10 while i <= 100_000: print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
77
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore A = """ Human: <<task>> Assistant: """ A = """huggingface-tools/default-prompts""" A = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""} def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase="run" ) -> List[str]: """simple docstring""" if prompt_or_repo_id is None: __UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , UpperCamelCase ) is not None: return prompt_or_repo_id __UpperCAmelCase : str = cached_file( UpperCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(UpperCamelCase , "r" , encoding="utf-8" ) as f: return f.read()
77
1
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() A = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=True ) -> Optional[Any]: """simple docstring""" if model_type not in MODEL_CLASSES: raise ValueError(f"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}." ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: __UpperCAmelCase : List[str] = cached_file(UpperCamelCase , UpperCamelCase , force_download=not use_cached_models ) __UpperCAmelCase : Tuple = config_class.from_json_file(UpperCamelCase ) __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : str = True print(f"Building TensorFlow model from configuration: {config}" ) __UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): __UpperCAmelCase : int = cached_file( UpperCamelCase , UpperCamelCase , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: __UpperCAmelCase : Union[str, Any] = load_pytorch_checkpoint_in_tfa_model(UpperCamelCase , UpperCamelCase ) if compare_with_pt_model: __UpperCAmelCase : int = tf_model(tf_model.dummy_inputs , training=UpperCamelCase ) # build the network __UpperCAmelCase : str = torch.load(UpperCamelCase , map_location="cpu" ) __UpperCAmelCase : List[Any] = pt_model_class.from_pretrained( pretrained_model_name_or_path=UpperCamelCase , config=UpperCamelCase , state_dict=UpperCamelCase ) with torch.no_grad(): __UpperCAmelCase : Dict = pt_model(**pt_model.dummy_inputs ) __UpperCAmelCase : Optional[int] = pto[0].numpy() __UpperCAmelCase : List[str] = tfo[0].numpy() __UpperCAmelCase : List[Any] = np.amax(np.abs(np_pt - np_tf ) ) print(f"Max absolute difference between models outputs {diff}" ) assert diff <= 2e-2, f"Error, model absolute difference is >2e-2: {diff}" # Save pytorch-model print(f"Save TensorFlow model to {tf_dump_path}" ) tf_model.save_weights(UpperCamelCase , save_format="h5" ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False , ) -> Any: """simple docstring""" if args_model_type is None: __UpperCAmelCase : Dict = list(MODEL_CLASSES.keys() ) else: __UpperCAmelCase : List[Any] = [args_model_type] for j, model_type in enumerate(UpperCamelCase , start=1 ): print("=" * 100 ) print(f" Converting model type {j}/{len(UpperCamelCase )}: {model_type}" ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}." ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: __UpperCAmelCase : List[Any] = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: __UpperCAmelCase : int = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(UpperCamelCase , UpperCamelCase ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f" Skipping finetuned checkpoint {model_shortcut_name}" ) continue __UpperCAmelCase : Optional[Any] = model_shortcut_name elif only_convert_finetuned_models: print(f" Skipping not finetuned checkpoint {model_shortcut_name}" ) continue print( f" Converting checkpoint {i}/{len(UpperCamelCase )}: {model_shortcut_name} - model_type {model_type}" ) print("-" * 100 ) if config_shortcut_name in aws_config_map: __UpperCAmelCase : Dict = cached_file(UpperCamelCase , UpperCamelCase , force_download=not use_cached_models ) else: __UpperCAmelCase : Optional[Any] = config_shortcut_name if model_shortcut_name in aws_model_maps: __UpperCAmelCase : int = cached_file(UpperCamelCase , UpperCamelCase , force_download=not use_cached_models ) else: __UpperCAmelCase : List[str] = model_shortcut_name if os.path.isfile(UpperCamelCase ): __UpperCAmelCase : Tuple = "converted_model" convert_pt_checkpoint_to_tf( model_type=UpperCamelCase , pytorch_checkpoint_path=UpperCamelCase , config_file=UpperCamelCase , tf_dump_path=os.path.join(UpperCamelCase , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=UpperCamelCase , ) if remove_cached_files: os.remove(UpperCamelCase ) os.remove(UpperCamelCase ) if __name__ == "__main__": A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ''' """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") A = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
77
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available A = { """configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""", """ErnieForCausalLM""", """ErnieForMaskedLM""", """ErnieForMultipleChoice""", """ErnieForNextSentencePrediction""", """ErnieForPreTraining""", """ErnieForQuestionAnswering""", """ErnieForSequenceClassification""", """ErnieForTokenClassification""", """ErnieModel""", """ErniePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
1
"""simple docstring""" import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class a__ ( unittest.TestCase ): def __init__( self : Tuple , UpperCamelCase_ : Optional[Any]): """simple docstring""" __UpperCAmelCase : Tuple = parent def a_ ( self : Dict): """simple docstring""" return {} def _UpperCamelCase ( ) -> Dict: """simple docstring""" __UpperCAmelCase : List[Any] = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>" __UpperCAmelCase : List[str] = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n " return [html_string_a, html_string_a] @require_bsa class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = MarkupLMFeatureExtractor if is_bsa_available() else None def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : List[str] = MarkupLMFeatureExtractionTester(self) @property def a_ ( self : Tuple): """simple docstring""" return self.feature_extract_tester.prepare_feat_extract_dict() def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : List[Any] = self.feature_extraction_class() # Test not batched input __UpperCAmelCase : Tuple = get_html_strings()[0] __UpperCAmelCase : Tuple = feature_extractor(UpperCamelCase_) # fmt: off __UpperCAmelCase : Dict = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]] __UpperCAmelCase : List[Any] = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]] # fmt: on self.assertEqual(encoding.nodes , UpperCamelCase_) self.assertEqual(encoding.xpaths , UpperCamelCase_) # Test batched __UpperCAmelCase : Optional[int] = get_html_strings() __UpperCAmelCase : str = feature_extractor(UpperCamelCase_) # fmt: off __UpperCAmelCase : Optional[int] = expected_nodes + [["My First Heading", "My first paragraph."]] __UpperCAmelCase : int = expected_xpaths + [["/html/body/h1", "/html/body/p"]] self.assertEqual(len(encoding.nodes) , 2) self.assertEqual(len(encoding.xpaths) , 2) self.assertEqual(encoding.nodes , UpperCamelCase_) self.assertEqual(encoding.xpaths , UpperCamelCase_)
77
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class a__ ( nn.Module ): def __init__( self : Union[str, Any]): """simple docstring""" super().__init__() __UpperCAmelCase : Optional[int] = nn.Linear(3 , 4) __UpperCAmelCase : str = nn.BatchNormad(4) __UpperCAmelCase : int = nn.Linear(4 , 5) def a_ ( self : str , UpperCamelCase_ : List[str]): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(UpperCamelCase_))) class a__ ( unittest.TestCase ): def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Optional[Any] = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , model.state_dict()) __UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase_ , "index.json") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: __UpperCAmelCase : Optional[int] = os.path.join(UpperCamelCase_ , F"{key}.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on the fact weights are properly loaded def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : int = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: __UpperCAmelCase : List[Any] = torch.randn(2 , 3 , dtype=UpperCamelCase_) with TemporaryDirectory() as tmp_dir: __UpperCAmelCase : Tuple = offload_weight(UpperCamelCase_ , "weight" , UpperCamelCase_ , {}) __UpperCAmelCase : Dict = os.path.join(UpperCamelCase_ , "weight.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) self.assertDictEqual(UpperCamelCase_ , {"weight": {"shape": [2, 3], "dtype": str(UpperCamelCase_).split(".")[1]}}) __UpperCAmelCase : Optional[Any] = load_offloaded_weight(UpperCamelCase_ , index["weight"]) self.assertTrue(torch.equal(UpperCamelCase_ , UpperCamelCase_)) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : List[Any] = ModelForTest() __UpperCAmelCase : Optional[int] = model.state_dict() __UpperCAmelCase : List[str] = {k: v for k, v in state_dict.items() if "linear2" not in k} __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "linear2" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : List[str] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "weight" in k} __UpperCAmelCase : Optional[Any] = {k: v for k, v in state_dict.items() if "weight" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[Any] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) # Duplicates are removed __UpperCAmelCase : str = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Any = {"a.1": 0, "a.10": 1, "a.2": 2} __UpperCAmelCase : Union[str, Any] = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1": 0, "a.2": 2}) __UpperCAmelCase : int = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2} __UpperCAmelCase : int = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1.a": 0, "a.2.a": 2})
77
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: A = None A = logging.get_logger(__name__) A = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} A = { """vocab_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""", }, """tokenizer_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""", }, } A = { """camembert-base""": 512, } A = """▁""" class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = ["input_ids", "attention_mask"] lowercase_ = CamembertTokenizer def __init__( self : Optional[int] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : int="<s>" , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Union[str, Any]="</s>" , UpperCamelCase_ : Any="<s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : List[Any]="<pad>" , UpperCamelCase_ : Union[str, Any]="<mask>" , UpperCamelCase_ : Any=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCamelCase_ : Tuple , ): """simple docstring""" __UpperCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else mask_token super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : List[str] = vocab_file __UpperCAmelCase : Union[str, Any] = False if not self.vocab_file else True def a_ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] __UpperCAmelCase : List[str] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a_ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Dict = [self.sep_token_id] __UpperCAmelCase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def a_ ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : int = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_): copyfile(self.vocab_file , UpperCamelCase_) return (out_vocab_file,)
77
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Dict = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): __UpperCAmelCase : Union[str, Any] = n - k # Calculate C(n,k) for i in range(UpperCamelCase ): result *= n - i result //= i + 1 return result def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" return binomial_coefficient(2 * node_count , UpperCamelCase ) // (node_count + 1) def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" if n < 0: raise ValueError("factorial() not defined for negative values" ) __UpperCAmelCase : Optional[Any] = 1 for i in range(1 , n + 1 ): result *= i return result def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" return catalan_number(UpperCamelCase ) * factorial(UpperCamelCase ) if __name__ == "__main__": A = int(input("""Enter the number of nodes: """).strip() or 0) if node_count <= 0: raise ValueError("""We need some nodes to work with.""") print( f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} ''' f'''binary trees and {catalan_number(node_count)} binary search trees.''' )
77
1
"""simple docstring""" import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version A = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""") def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1_6000 ) -> List[Any]: """simple docstring""" __UpperCAmelCase : int = int(round(sample_rate * max_length ) ) if len(UpperCamelCase ) <= sample_length: return wav __UpperCAmelCase : int = randint(0 , len(UpperCamelCase ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class a__ : lowercase_ = field(default=__magic_name__ , metadata={"help": "Name of a dataset from the datasets package"} ) lowercase_ = field( default=__magic_name__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) lowercase_ = field( default=__magic_name__ , metadata={"help": "A file containing the training audio paths and labels."} ) lowercase_ = field( default=__magic_name__ , metadata={"help": "A file containing the validation audio paths and labels."} ) lowercase_ = field( default="train" , metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" } , ) lowercase_ = field( default="validation" , metadata={ "help": ( "The name of the training data set split to use (via the datasets library). Defaults to 'validation'" ) } , ) lowercase_ = field( default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , ) lowercase_ = field( default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} ) lowercase_ = field( default=__magic_name__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) lowercase_ = field( default=__magic_name__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) lowercase_ = field( default=2_0 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , ) @dataclass class a__ : lowercase_ = field( default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , ) lowercase_ = field( default=__magic_name__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) lowercase_ = field( default=__magic_name__ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} ) lowercase_ = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) lowercase_ = field( default=__magic_name__ , metadata={"help": "Name or path of preprocessor config."} ) lowercase_ = field( default=__magic_name__ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} ) lowercase_ = field( default=__magic_name__ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} ) lowercase_ = field( default=__magic_name__ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) lowercase_ = field( default=__magic_name__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) lowercase_ = field( default=__magic_name__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , ) def a_ ( self : Optional[int]): """simple docstring""" if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( "The argument `--freeze_feature_extractor` is deprecated and " "will be removed in a future version. Use `--freeze_feature_encoder`" "instead. Setting `freeze_feature_encoder==True`." , UpperCamelCase_ , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( "The argument `--freeze_feature_extractor` is deprecated and " "should not be used in combination with `--freeze_feature_encoder`." "Only make use of `--freeze_feature_encoder`.") def _UpperCamelCase ( ) -> Any: """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_audio_classification" , UpperCamelCase , UpperCamelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __UpperCAmelCase : List[str] = training_args.get_process_log_level() logger.setLevel(UpperCamelCase ) transformers.utils.logging.set_verbosity(UpperCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} " + f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(f"Training/evaluation parameters {training_args}" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. __UpperCAmelCase : str = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __UpperCAmelCase : Dict = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to train from scratch." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset and prepare it for the audio classification task. __UpperCAmelCase : Optional[int] = DatasetDict() __UpperCAmelCase : Union[str, Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) __UpperCAmelCase : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--audio_column_name` to the correct audio column - one of " f"{', '.join(raw_datasets['train'].column_names )}." ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--label_column_name` to the correct text column - one of " f"{', '.join(raw_datasets['train'].column_names )}." ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy __UpperCAmelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. __UpperCAmelCase : List[str] = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) __UpperCAmelCase : Tuple = feature_extractor.model_input_names[0] def train_transforms(UpperCamelCase ): __UpperCAmelCase : Optional[int] = [] for audio in batch[data_args.audio_column_name]: __UpperCAmelCase : int = random_subsample( audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(UpperCamelCase ) __UpperCAmelCase : Any = feature_extractor(UpperCamelCase , sampling_rate=feature_extractor.sampling_rate ) __UpperCAmelCase : Any = {model_input_name: inputs.get(UpperCamelCase )} __UpperCAmelCase : int = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(UpperCamelCase ): __UpperCAmelCase : Any = [audio["array"] for audio in batch[data_args.audio_column_name]] __UpperCAmelCase : List[Any] = feature_extractor(UpperCamelCase , sampling_rate=feature_extractor.sampling_rate ) __UpperCAmelCase : Union[str, Any] = {model_input_name: inputs.get(UpperCamelCase )} __UpperCAmelCase : List[Any] = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. __UpperCAmelCase : Tuple = raw_datasets["train"].features[data_args.label_column_name].names __UpperCAmelCase , __UpperCAmelCase : List[Any] = {}, {} for i, label in enumerate(UpperCamelCase ): __UpperCAmelCase : List[str] = str(UpperCamelCase ) __UpperCAmelCase : str = label # Load the accuracy metric from the datasets package __UpperCAmelCase : Union[str, Any] = evaluate.load("accuracy" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(UpperCamelCase ): __UpperCAmelCase : str = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=UpperCamelCase , references=eval_pred.label_ids ) __UpperCAmelCase : Dict = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel=UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __UpperCAmelCase : Tuple = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: __UpperCAmelCase : Any = ( raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(UpperCamelCase , output_all_columns=UpperCamelCase ) if training_args.do_eval: if data_args.max_eval_samples is not None: __UpperCAmelCase : Union[str, Any] = ( raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(UpperCamelCase , output_all_columns=UpperCamelCase ) # Initialize our trainer __UpperCAmelCase : Optional[int] = Trainer( model=UpperCamelCase , args=UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=UpperCamelCase , tokenizer=UpperCamelCase , ) # Training if training_args.do_train: __UpperCAmelCase : List[Any] = None if training_args.resume_from_checkpoint is not None: __UpperCAmelCase : Any = training_args.resume_from_checkpoint elif last_checkpoint is not None: __UpperCAmelCase : Tuple = last_checkpoint __UpperCAmelCase : Optional[Any] = trainer.train(resume_from_checkpoint=UpperCamelCase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __UpperCAmelCase : Union[str, Any] = trainer.evaluate() trainer.log_metrics("eval" , UpperCamelCase ) trainer.save_metrics("eval" , UpperCamelCase ) # Write model card and (optionally) push to hub __UpperCAmelCase : Any = { "finetuned_from": model_args.model_name_or_path, "tasks": "audio-classification", "dataset": data_args.dataset_name, "tags": ["audio-classification"], } if training_args.push_to_hub: trainer.push_to_hub(**UpperCamelCase ) else: trainer.create_model_card(**UpperCamelCase ) if __name__ == "__main__": main()
77
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) A = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
1
"""simple docstring""" import sys def _UpperCamelCase ( UpperCamelCase ) -> Tuple: """simple docstring""" __UpperCAmelCase : Optional[Any] = len(UpperCamelCase ) __UpperCAmelCase : str = [[0 for x in range(UpperCamelCase )] for x in range(UpperCamelCase )] __UpperCAmelCase : Tuple = [[0 for x in range(UpperCamelCase )] for x in range(UpperCamelCase )] for chain_length in range(2 , UpperCamelCase ): for a in range(1 , n - chain_length + 1 ): __UpperCAmelCase : Optional[Any] = a + chain_length - 1 __UpperCAmelCase : List[str] = sys.maxsize for c in range(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : Optional[int] = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: __UpperCAmelCase : Any = cost __UpperCAmelCase : List[Any] = c return matrix, sol def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: """simple docstring""" if i == j: print("A" + str(UpperCamelCase ) , end=" " ) else: print("(" , end=" " ) print_optiomal_solution(UpperCamelCase , UpperCamelCase , optimal_solution[i][j] ) print_optiomal_solution(UpperCamelCase , optimal_solution[i][j] + 1 , UpperCamelCase ) print(")" , end=" " ) def _UpperCamelCase ( ) -> List[str]: """simple docstring""" __UpperCAmelCase : List[str] = [30, 35, 15, 5, 10, 20, 25] __UpperCAmelCase : Any = len(UpperCamelCase ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = matrix_chain_order(UpperCamelCase ) print("No. of Operation required: " + str(matrix[1][n - 1] ) ) print_optiomal_solution(UpperCamelCase , 1 , n - 1 ) if __name__ == "__main__": main()
77
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a__ ( unittest.TestCase ): def __init__( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any=13 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : int=224 , UpperCamelCase_ : int=30 , UpperCamelCase_ : str=400 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ): """simple docstring""" __UpperCAmelCase : Tuple = size if size is not None else {"height": 18, "width": 18} __UpperCAmelCase : List[Any] = parent __UpperCAmelCase : Tuple = batch_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : List[Any] = image_size __UpperCAmelCase : str = min_resolution __UpperCAmelCase : Tuple = max_resolution __UpperCAmelCase : Optional[Any] = do_resize __UpperCAmelCase : Any = size __UpperCAmelCase : Any = do_normalize __UpperCAmelCase : Any = image_mean __UpperCAmelCase : Optional[Any] = image_std def a_ ( self : str): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ViTImageProcessor if is_vision_available() else None def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Optional[Any] = EfficientFormerImageProcessorTester(self) @property def a_ ( self : Union[str, Any]): """simple docstring""" return self.image_proc_tester.prepare_image_processor_dict() def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase_ , "image_mean")) self.assertTrue(hasattr(UpperCamelCase_ , "image_std")) self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize")) self.assertTrue(hasattr(UpperCamelCase_ , "do_resize")) self.assertTrue(hasattr(UpperCamelCase_ , "size")) def a_ ( self : Dict): """simple docstring""" pass def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __UpperCAmelCase : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image) # Test not batched input __UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray) # Test not batched input __UpperCAmelCase : Tuple = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Any = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __UpperCAmelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor) # Test not batched input __UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
77
1
"""simple docstring""" import math def _UpperCamelCase ( UpperCamelCase = 100 ) -> int: """simple docstring""" __UpperCAmelCase : Any = sum(i * i for i in range(1 , n + 1 ) ) __UpperCAmelCase : Union[str, Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f'''{solution() = }''')
77
"""simple docstring""" from collections import namedtuple A = namedtuple("""from_to""", """from_ to""") A = { """cubicmeter""": from_to(1, 1), """litre""": from_to(0.001, 1_000), """kilolitre""": from_to(1, 1), """gallon""": from_to(0.00454, 264.172), """cubicyard""": from_to(0.76455, 1.30795), """cubicfoot""": from_to(0.028, 35.3147), """cup""": from_to(0.000236588, 4226.75), } def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: """simple docstring""" if from_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'from_type' value: {from_type!r} Supported values are:\n" + ", ".join(UpperCamelCase ) ) if to_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n" + ", ".join(UpperCamelCase ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
77
1
"""simple docstring""" import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 A = get_tests_dir("""fixtures""") class a__ ( unittest.TestCase ): def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Tuple = mock.Mock() __UpperCAmelCase : str = 500 __UpperCAmelCase : Optional[Any] = {} __UpperCAmelCase : int = HTTPError __UpperCAmelCase : Dict = {} # Download this model to make sure it's in the cache. __UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=UpperCamelCase_) as mock_head: __UpperCAmelCase : int = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit") # This check we did call the fake head request mock_head.assert_called() def a_ ( self : List[Any]): """simple docstring""" __UpperCAmelCase : Dict = ViTImageProcessor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json") def a_ ( self : int): """simple docstring""" with self.assertRaises(UpperCamelCase_): # config is in subfolder, the following should not work without specifying the subfolder __UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants") __UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained( "hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor") self.assertIsNotNone(UpperCamelCase_) @is_staging_test class a__ ( unittest.TestCase ): @classmethod def a_ ( cls : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = TOKEN HfFolder.save_token(UpperCamelCase_) @classmethod def a_ ( cls : int): """simple docstring""" try: delete_repo(token=cls._token , repo_id="test-image-processor") except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org") except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-image-processor") except HTTPError: pass def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : int = ViTImageProcessor.from_pretrained(UpperCamelCase_) image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token) __UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_)) # Reset repo delete_repo(token=self._token , repo_id="test-image-processor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( UpperCamelCase_ , repo_id="test-image-processor" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token) __UpperCAmelCase : Dict = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_)) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Any = ViTImageProcessor.from_pretrained(UpperCamelCase_) image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token) __UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_)) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-image-processor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( UpperCamelCase_ , repo_id="valid_org/test-image-processor-org" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token) __UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org") for k, v in image_processor.__dict__.items(): self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_)) def a_ ( self : Optional[int]): """simple docstring""" CustomImageProcessor.register_for_auto_class() __UpperCAmelCase : int = CustomImageProcessor.from_pretrained(UpperCamelCase_) image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , ) __UpperCAmelCase : Any = AutoImageProcessor.from_pretrained( F"{USER}/test-dynamic-image-processor" , trust_remote_code=UpperCamelCase_) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor")
77
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ShapEPipeline lowercase_ = ["prompt"] lowercase_ = ["prompt"] lowercase_ = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] lowercase_ = False @property def a_ ( self : Optional[int]): """simple docstring""" return 32 @property def a_ ( self : Any): """simple docstring""" return 32 @property def a_ ( self : int): """simple docstring""" return self.time_input_dim * 4 @property def a_ ( self : List[Any]): """simple docstring""" return 8 @property def a_ ( self : List[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def a_ ( self : List[str]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(UpperCamelCase_) @property def a_ ( self : Any): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Union[str, Any] = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } __UpperCAmelCase : Dict = PriorTransformer(**UpperCamelCase_) return model @property def a_ ( self : Union[str, Any]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Tuple = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } __UpperCAmelCase : List[Any] = ShapERenderer(**UpperCamelCase_) return model def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.dummy_prior __UpperCAmelCase : str = self.dummy_text_encoder __UpperCAmelCase : int = self.dummy_tokenizer __UpperCAmelCase : int = self.dummy_renderer __UpperCAmelCase : Tuple = HeunDiscreteScheduler( beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , ) __UpperCAmelCase : str = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "renderer": renderer, "scheduler": scheduler, } return components def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any=0): """simple docstring""" if str(UpperCamelCase_).startswith("mps"): __UpperCAmelCase : List[Any] = torch.manual_seed(UpperCamelCase_) else: __UpperCAmelCase : str = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_) __UpperCAmelCase : List[Any] = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "np", } return inputs def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : str = "cpu" __UpperCAmelCase : Union[str, Any] = self.get_dummy_components() __UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase_)) __UpperCAmelCase : Union[str, Any] = output.images[0] __UpperCAmelCase : str = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __UpperCAmelCase : Union[str, Any] = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self : Tuple): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2]) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch_device == "cpu" __UpperCAmelCase : Optional[Any] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , ) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.get_dummy_components() __UpperCAmelCase : List[str] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : int = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Any = 2 __UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_) for key in inputs.keys(): if key in self.batch_params: __UpperCAmelCase : List[Any] = batch_size * [inputs[key]] __UpperCAmelCase : List[Any] = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_)[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class a__ ( unittest.TestCase ): def a_ ( self : List[str]): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy") __UpperCAmelCase : Optional[Any] = ShapEPipeline.from_pretrained("openai/shap-e") __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Dict = torch.Generator(device=UpperCamelCase_).manual_seed(0) __UpperCAmelCase : int = pipe( "a shark" , generator=UpperCamelCase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_)
77
1
"""simple docstring""" from __future__ import annotations A = [] def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> bool: """simple docstring""" for i in range(len(UpperCamelCase ) ): if board[row][i] == 1: return False for i in range(len(UpperCamelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ): if board[i][j] == 1: return False return True def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> bool: """simple docstring""" if row >= len(UpperCamelCase ): solution.append(UpperCamelCase ) printboard(UpperCamelCase ) print() return True for i in range(len(UpperCamelCase ) ): if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : Any = 1 solve(UpperCamelCase , row + 1 ) __UpperCAmelCase : Dict = 0 return False def _UpperCamelCase ( UpperCamelCase ) -> None: """simple docstring""" for i in range(len(UpperCamelCase ) ): for j in range(len(UpperCamelCase ) ): if board[i][j] == 1: print("Q" , end=" " ) else: print("." , end=" " ) print() # n=int(input("The no. of queens")) A = 8 A = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print("""The total no. of solutions are :""", len(solution))
77
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_features", "is_longer"] def __init__( self : List[str] , UpperCamelCase_ : Dict=64 , UpperCamelCase_ : Tuple=48000 , UpperCamelCase_ : List[Any]=480 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=1024 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 14000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Union[str, Any] = top_db __UpperCAmelCase : Optional[Any] = truncation __UpperCAmelCase : str = padding __UpperCAmelCase : int = fft_window_size __UpperCAmelCase : str = (fft_window_size >> 1) + 1 __UpperCAmelCase : List[Any] = hop_length __UpperCAmelCase : Optional[Any] = max_length_s __UpperCAmelCase : Tuple = max_length_s * sampling_rate __UpperCAmelCase : str = sampling_rate __UpperCAmelCase : int = frequency_min __UpperCAmelCase : Optional[Any] = frequency_max __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale="htk" , ) __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm="slaney" , mel_scale="slaney" , ) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Dict = copy.deepcopy(self.__dict__) __UpperCAmelCase : str = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a_ ( self : int , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None): """simple docstring""" __UpperCAmelCase : List[Any] = spectrogram( UpperCamelCase_ , window_function(self.fft_window_size , "hann") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel="dB" , ) return log_mel_spectrogram.T def a_ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3) if len(ranges[1]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : str = [0] if len(ranges[2]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : Dict = [0] # randomly choose index for each part __UpperCAmelCase : Dict = np.random.choice(ranges[0]) __UpperCAmelCase : List[str] = np.random.choice(ranges[1]) __UpperCAmelCase : List[Any] = np.random.choice(ranges[2]) __UpperCAmelCase : List[Any] = mel[idx_front : idx_front + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_middle : idx_middle + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_back : idx_back + chunk_frames, :] __UpperCAmelCase : Tuple = torch.tensor(mel[None, None, :]) __UpperCAmelCase : Union[str, Any] = torch.nn.functional.interpolate( UpperCamelCase_ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = mel_shrink[0][0].numpy() __UpperCAmelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0) return mel_fusion def a_ ( self : Optional[Any] , UpperCamelCase_ : np.array , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]): """simple docstring""" if waveform.shape[0] > max_length: if truncation == "rand_trunc": __UpperCAmelCase : List[str] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad __UpperCAmelCase : List[Any] = len(UpperCamelCase_) - max_length __UpperCAmelCase : int = np.random.randint(0 , overflow + 1) __UpperCAmelCase : Union[str, Any] = waveform[idx : idx + max_length] __UpperCAmelCase : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] elif truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed __UpperCAmelCase : Tuple = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. __UpperCAmelCase : List[str] = np.stack([mel, mel, mel, mel] , axis=0) __UpperCAmelCase : Any = False else: __UpperCAmelCase : List[str] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = True else: raise NotImplementedError(F"data_truncating {truncation} not implemented") else: __UpperCAmelCase : Optional[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": __UpperCAmelCase : Tuple = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1))[:max_length] if padding == "repeatpad": __UpperCAmelCase : Union[str, Any] = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : Optional[Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : int = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0) if truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0) else: __UpperCAmelCase : Optional[int] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] return input_mel, longer def __call__( self : Dict , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Any , ): """simple docstring""" __UpperCAmelCase : int = truncation if truncation is not None else self.truncation __UpperCAmelCase : Optional[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" F" was sampled with {self.sampling_rate} and not {sampling_rate}.") else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug.") __UpperCAmelCase : List[str] = isinstance(UpperCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}") __UpperCAmelCase : str = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: __UpperCAmelCase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray): __UpperCAmelCase : Tuple = np.asarray(UpperCamelCase_ , dtype=np.floataa) elif isinstance(UpperCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): __UpperCAmelCase : Optional[int] = raw_speech.astype(np.floataa) # always return batch if not is_batched: __UpperCAmelCase : int = [np.asarray(UpperCamelCase_)] # convert to mel spectrogram, truncate and pad if needed. __UpperCAmelCase : Optional[int] = [ self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_) for waveform in raw_speech ] __UpperCAmelCase : Tuple = [] __UpperCAmelCase : List[Any] = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase_) is_longer.append(UpperCamelCase_) if truncation == "fusion" and sum(UpperCamelCase_) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer __UpperCAmelCase : Any = np.random.randint(0 , len(UpperCamelCase_)) __UpperCAmelCase : Optional[int] = True if isinstance(input_mel[0] , UpperCamelCase_): __UpperCAmelCase : Tuple = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for feature in input_mel] # is_longer is a list of bool __UpperCAmelCase : List[str] = [[longer] for longer in is_longer] __UpperCAmelCase : Optional[int] = {"input_features": input_mel, "is_longer": is_longer} __UpperCAmelCase : Optional[int] = BatchFeature(UpperCamelCase_) if return_tensors is not None: __UpperCAmelCase : Any = input_features.convert_to_tensors(UpperCamelCase_) return input_features
77
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = KandinskyVaaInpaintPipeline lowercase_ = ["image_embeds", "negative_image_embeds", "image", "mask_image"] lowercase_ = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] lowercase_ = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] lowercase_ = False @property def a_ ( self : Dict): """simple docstring""" return 32 @property def a_ ( self : str): """simple docstring""" return 32 @property def a_ ( self : Any): """simple docstring""" return self.time_input_dim @property def a_ ( self : Any): """simple docstring""" return self.time_input_dim * 4 @property def a_ ( self : Union[str, Any]): """simple docstring""" return 100 @property def a_ ( self : Any): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Dict = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __UpperCAmelCase : str = UNetaDConditionModel(**UpperCamelCase_) return model @property def a_ ( self : Union[str, Any]): """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def a_ ( self : str): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : int = VQModel(**self.dummy_movq_kwargs) return model def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : int = self.dummy_unet __UpperCAmelCase : Tuple = self.dummy_movq __UpperCAmelCase : Any = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , steps_offset=1 , prediction_type="epsilon" , thresholding=UpperCamelCase_ , ) __UpperCAmelCase : List[str] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def a_ ( self : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict=0): """simple docstring""" __UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase_)).to(UpperCamelCase_) __UpperCAmelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( UpperCamelCase_) # create init_image __UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_)).to(UpperCamelCase_) __UpperCAmelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1)[0] __UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(UpperCamelCase_)).convert("RGB").resize((256, 256)) # create mask __UpperCAmelCase : Any = np.ones((64, 64) , dtype=np.floataa) __UpperCAmelCase : str = 0 if str(UpperCamelCase_).startswith("mps"): __UpperCAmelCase : int = torch.manual_seed(UpperCamelCase_) else: __UpperCAmelCase : List[Any] = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_) __UpperCAmelCase : List[str] = { "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Dict = "cpu" __UpperCAmelCase : Optional[Any] = self.get_dummy_components() __UpperCAmelCase : int = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : List[str] = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Any = pipe(**self.get_dummy_inputs(UpperCamelCase_)) __UpperCAmelCase : Any = output.images __UpperCAmelCase : int = pipe( **self.get_dummy_inputs(UpperCamelCase_) , return_dict=UpperCamelCase_ , )[0] __UpperCAmelCase : int = image[0, -3:, -3:, -1] __UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1] print(F"image.shape {image.shape}") assert image.shape == (1, 64, 64, 3) __UpperCAmelCase : Union[str, Any] = np.array( [0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def a_ ( self : List[Any]): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class a__ ( unittest.TestCase ): def a_ ( self : Tuple): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy") __UpperCAmelCase : int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png") __UpperCAmelCase : Union[str, Any] = np.ones((768, 768) , dtype=np.floataa) __UpperCAmelCase : Union[str, Any] = 0 __UpperCAmelCase : Optional[int] = "a hat" __UpperCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa) pipe_prior.to(UpperCamelCase_) __UpperCAmelCase : Optional[int] = KandinskyVaaInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa) __UpperCAmelCase : str = pipeline.to(UpperCamelCase_) pipeline.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = torch.Generator(device="cpu").manual_seed(0) __UpperCAmelCase , __UpperCAmelCase : Dict = pipe_prior( UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() __UpperCAmelCase : Optional[Any] = pipeline( image=UpperCamelCase_ , mask_image=UpperCamelCase_ , image_embeds=UpperCamelCase_ , negative_image_embeds=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , ) __UpperCAmelCase : str = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_)
77
"""simple docstring""" import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[str]="<pad>" , UpperCamelCase_ : Union[str, Any]=125 , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: __UpperCAmelCase : int = [F"<extra_id_{i}>" for i in range(UpperCamelCase_)] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens __UpperCAmelCase : Dict = len(set(filter(lambda UpperCamelCase_: bool("extra_id" in str(UpperCamelCase_)) , UpperCamelCase_))) if extra_tokens != extra_ids: raise ValueError( F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to ByT5Tokenizer. In this case the additional_special_tokens must include the" " extra_ids tokens") __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else pad_token __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else eos_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else unk_token super().__init__( eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , extra_ids=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : List[str] = extra_ids __UpperCAmelCase : int = 2**8 # utf is 8 bits # define special tokens dict __UpperCAmelCase : Dict[int, str] = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } __UpperCAmelCase : Any = len(self.special_tokens_encoder) __UpperCAmelCase : List[Any] = len(UpperCamelCase_) for i, token in enumerate(UpperCamelCase_): __UpperCAmelCase : Union[str, Any] = self.vocab_size + i - n __UpperCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()} @property def a_ ( self : List[Any]): """simple docstring""" return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def a_ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCamelCase_)) + [1] return ([0] * len(UpperCamelCase_)) + [1] + ([0] * len(UpperCamelCase_)) + [1] def a_ ( self : Optional[Any] , UpperCamelCase_ : List[int]): """simple docstring""" if len(UpperCamelCase_) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" " eos tokens being added.") return token_ids else: return token_ids + [self.eos_token_id] def a_ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Dict = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos) * [0] return len(token_ids_a + eos + token_ids_a + eos) * [0] def a_ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Optional[Any] = self._add_eos_if_not_present(UpperCamelCase_) if token_ids_a is None: return token_ids_a else: __UpperCAmelCase : List[Any] = self._add_eos_if_not_present(UpperCamelCase_) return token_ids_a + token_ids_a def a_ ( self : List[str] , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : Any = [chr(UpperCamelCase_) for i in text.encode("utf-8")] return tokens def a_ ( self : Tuple , UpperCamelCase_ : List[Any]): """simple docstring""" if token in self.special_tokens_encoder: __UpperCAmelCase : Any = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: __UpperCAmelCase : int = self.added_tokens_encoder[token] elif len(UpperCamelCase_) != 1: __UpperCAmelCase : Optional[Any] = self.unk_token_id else: __UpperCAmelCase : Any = ord(UpperCamelCase_) + self._num_special_tokens return token_id def a_ ( self : Any , UpperCamelCase_ : List[str]): """simple docstring""" if index in self.special_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[index] else: __UpperCAmelCase : List[str] = chr(index - self._num_special_tokens) return token def a_ ( self : Dict , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : str = b"" for token in tokens: if token in self.special_tokens_decoder: __UpperCAmelCase : Tuple = self.special_tokens_decoder[token].encode("utf-8") elif token in self.added_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[token].encode("utf-8") elif token in self.special_tokens_encoder: __UpperCAmelCase : Optional[int] = token.encode("utf-8") elif token in self.added_tokens_encoder: __UpperCAmelCase : Optional[Any] = token.encode("utf-8") else: __UpperCAmelCase : Any = bytes([ord(UpperCamelCase_)]) bstring += tok_string __UpperCAmelCase : List[Any] = bstring.decode("utf-8" , errors="ignore") return string def a_ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" return ()
77
1
"""simple docstring""" import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class a__ ( unittest.TestCase ): lowercase_ = inspect.getfile(accelerate.test_utils ) lowercase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] ) lowercase_ = ["accelerate", "launch"] lowercase_ = Path.home() / ".cache/huggingface/accelerate" lowercase_ = "default_config.yaml" lowercase_ = config_folder / config_file lowercase_ = config_folder / "_default_config.yaml" lowercase_ = Path("tests/test_configs" ) @classmethod def a_ ( cls : Dict): """simple docstring""" if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path) @classmethod def a_ ( cls : List[str]): """simple docstring""" if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : List[str] = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy()) def a_ ( self : str): """simple docstring""" for config in sorted(self.test_config_path.glob("**/*.yaml")): with self.subTest(config_file=UpperCamelCase_): execute_subprocess_async( self.base_cmd + ["--config_file", str(UpperCamelCase_), self.test_file_path] , env=os.environ.copy()) def a_ ( self : int): """simple docstring""" execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy()) class a__ ( unittest.TestCase ): lowercase_ = "test-tpu" lowercase_ = "us-central1-a" lowercase_ = "ls" lowercase_ = ["accelerate", "tpu-config"] lowercase_ = "cd /usr/share" lowercase_ = "tests/test_samples/test_command_file.sh" lowercase_ = "Running gcloud compute tpus tpu-vm ssh" def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Union[str, Any] = run_command( self.cmd + ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=UpperCamelCase_ , ) self.assertIn( F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , UpperCamelCase_ , ) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = run_command( self.cmd + [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ] , return_stdout=UpperCamelCase_ , ) self.assertIn( F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , UpperCamelCase_ , ) def a_ ( self : str): """simple docstring""" __UpperCAmelCase : List[str] = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=UpperCamelCase_) self.assertIn( F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , UpperCamelCase_ , ) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : List[Any] = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=UpperCamelCase_ , ) self.assertIn( F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , UpperCamelCase_ , ) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Optional[Any] = run_command( self.cmd + [ "--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--command", "echo \"Hello World\"", "--debug", ] , return_stdout=UpperCamelCase_ , ) self.assertIn( F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all" , UpperCamelCase_ , ) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=UpperCamelCase_ , ) self.assertIn( F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , UpperCamelCase_ , ) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : str = run_command( self.cmd + [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command_file", self.command_file, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ] , return_stdout=UpperCamelCase_ , ) self.assertIn( F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , UpperCamelCase_ , ) def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Any = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=UpperCamelCase_ , ) self.assertIn( F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all" , UpperCamelCase_ , ) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Dict = run_command( self.cmd + [ "--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--accelerate_version", "12.0.0", "--debug", ] , return_stdout=UpperCamelCase_ , ) self.assertIn( F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all" , UpperCamelCase_ , )
77
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a__ ( unittest.TestCase ): def __init__( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=[10, 20, 30, 40] , UpperCamelCase_ : Tuple=[1, 1, 2, 1] , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict="relu" , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=None , ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : List[str] = image_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : Union[str, Any] = embeddings_size __UpperCAmelCase : Dict = hidden_sizes __UpperCAmelCase : Dict = depths __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : str = num_labels __UpperCAmelCase : Optional[int] = scope __UpperCAmelCase : Dict = len(UpperCamelCase_) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __UpperCAmelCase : Dict = self.get_config() return config, pixel_values def a_ ( self : Dict): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def a_ ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]): """simple docstring""" __UpperCAmelCase : List[str] = FlaxRegNetModel(config=UpperCamelCase_) __UpperCAmelCase : Dict = model(UpperCamelCase_) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a_ ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : List[Any] = self.num_labels __UpperCAmelCase : Tuple = FlaxRegNetForImageClassification(config=UpperCamelCase_) __UpperCAmelCase : str = model(UpperCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Any = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () lowercase_ = False lowercase_ = False lowercase_ = False def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = FlaxRegNetModelTester(self) __UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_) def a_ ( self : Dict): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a_ ( self : Tuple): """simple docstring""" return def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_) @unittest.skip(reason="RegNet does not use inputs_embeds") def a_ ( self : Union[str, Any]): """simple docstring""" pass @unittest.skip(reason="RegNet does not support input and output embeddings") def a_ ( self : Optional[int]): """simple docstring""" pass def a_ ( self : str): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : int = model_class(UpperCamelCase_) __UpperCAmelCase : Optional[int] = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Any = [*signature.parameters.keys()] __UpperCAmelCase : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase_) def a_ ( self : int): """simple docstring""" def check_hidden_states_output(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]): __UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : str = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase_) , expected_num_stages + 1) __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : List[str] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Optional[int] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): __UpperCAmelCase : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_) @jax.jit def model_jitted(UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int]): return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_) with self.subTest("JIT Enabled"): __UpperCAmelCase : Optional[Any] = model_jitted(**UpperCamelCase_).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): __UpperCAmelCase : Dict = model_jitted(**UpperCamelCase_).to_tuple() self.assertEqual(len(UpperCamelCase_) , len(UpperCamelCase_)) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_): self.assertEqual(jitted_output.shape , output.shape) def _UpperCamelCase ( ) -> Any: """simple docstring""" __UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_flax class a__ ( unittest.TestCase ): @cached_property def a_ ( self : Optional[int]): """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None @slow def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040") __UpperCAmelCase : Dict = self.default_image_processor __UpperCAmelCase : str = prepare_img() __UpperCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors="np") __UpperCAmelCase : Dict = model(**UpperCamelCase_) # verify the logits __UpperCAmelCase : Dict = (1, 1000) self.assertEqual(outputs.logits.shape , UpperCamelCase_) __UpperCAmelCase : Any = jnp.array([-0.4180, -1.5051, -3.4836]) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4))
77
1
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: """simple docstring""" # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file __UpperCAmelCase : Optional[Any] = TapasConfig.from_json_file(UpperCamelCase ) # set absolute/relative position embeddings parameter __UpperCAmelCase : Optional[Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": __UpperCAmelCase : List[str] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "WTQ": # run_task_main.py hparams __UpperCAmelCase : Tuple = 4 __UpperCAmelCase : Any = True # hparam_utils.py hparams __UpperCAmelCase : Union[str, Any] = 0.664694 __UpperCAmelCase : Union[str, Any] = 0.207951 __UpperCAmelCase : int = 0.121194 __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : List[str] = True __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : List[str] = 0.0352513 __UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams __UpperCAmelCase : int = 4 __UpperCAmelCase : Optional[int] = False # hparam_utils.py hparams __UpperCAmelCase : int = 36.4519 __UpperCAmelCase : str = 0.903421 __UpperCAmelCase : Dict = 222.088 __UpperCAmelCase : Dict = True __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : Tuple = True __UpperCAmelCase : Any = 0.763141 __UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "TABFACT": __UpperCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase ) elif task == "MLM": __UpperCAmelCase : Tuple = TapasForMaskedLM(config=UpperCamelCase ) elif task == "INTERMEDIATE_PRETRAINING": __UpperCAmelCase : List[str] = TapasModel(config=UpperCamelCase ) else: raise ValueError(f"Task {task} not supported." ) print(f"Building PyTorch model from configuration: {config}" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Save pytorch-model (weights and configuration) print(f"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(UpperCamelCase ) # Save tokenizer files print(f"Save tokenizer files to {pytorch_dump_path}" ) __UpperCAmelCase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 ) tokenizer.save_pretrained(UpperCamelCase ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) A = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
77
"""simple docstring""" from scipy.stats import spearmanr import datasets A = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ A = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ A = r"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def a_ ( self : Any): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float"), "references": datasets.Value("float"), }) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def a_ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False): """simple docstring""" __UpperCAmelCase : List[str] = spearmanr(UpperCamelCase_ , UpperCamelCase_) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
77
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
77
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) A = {"""vocab_file""": """spiece.model"""} A = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } A = {"""bert_for_seq_generation""": 512} class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = [] lowercase_ = ["input_ids", "attention_mask"] def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="<::::>" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : List[Any] , ): """simple docstring""" __UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __UpperCAmelCase : Dict = vocab_file __UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(UpperCamelCase_) @property def a_ ( self : List[str]): """simple docstring""" return self.sp_model.get_piece_size() def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : int = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : int): """simple docstring""" __UpperCAmelCase : Optional[int] = self.__dict__.copy() __UpperCAmelCase : List[Any] = None return state def __setstate__( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs"): __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def a_ ( self : Any , UpperCamelCase_ : str): """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_) def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" return self.sp_model.piece_to_id(UpperCamelCase_) def a_ ( self : Tuple , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : int = self.sp_model.IdToPiece(UpperCamelCase_) return token def a_ ( self : Dict , UpperCamelCase_ : Optional[Any]): """simple docstring""" __UpperCAmelCase : int = [] __UpperCAmelCase : Tuple = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(UpperCamelCase_) + token __UpperCAmelCase : List[Any] = [] else: current_sub_tokens.append(UpperCamelCase_) out_string += self.sp_model.decode(UpperCamelCase_) return out_string.strip() def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : Tuple = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCamelCase_) elif not os.path.isfile(self.vocab_file): with open(UpperCamelCase_ , "wb") as fi: __UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_) return (out_vocab_file,)
77
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer A = logging.get_logger(__name__) A = {"""vocab_file""": """vocab.txt"""} A = { """vocab_file""": { """YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""", """YituTech/conv-bert-medium-small""": ( """https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt""" ), """YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""", } } A = { """YituTech/conv-bert-base""": 512, """YituTech/conv-bert-medium-small""": 512, """YituTech/conv-bert-small""": 512, } A = { """YituTech/conv-bert-base""": {"""do_lower_case""": True}, """YituTech/conv-bert-medium-small""": {"""do_lower_case""": True}, """YituTech/conv-bert-small""": {"""do_lower_case""": True}, } class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = ConvBertTokenizer def __init__( self : Union[str, Any] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Optional[int]="[UNK]" , UpperCamelCase_ : str="[SEP]" , UpperCamelCase_ : Dict="[PAD]" , UpperCamelCase_ : Tuple="[CLS]" , UpperCamelCase_ : Dict="[MASK]" , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : Any , ): """simple docstring""" super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase" , UpperCamelCase_) != do_lower_case or normalizer_state.get("strip_accents" , UpperCamelCase_) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCamelCase_) != tokenize_chinese_chars ): __UpperCAmelCase : Tuple = getattr(UpperCamelCase_ , normalizer_state.pop("type")) __UpperCAmelCase : int = do_lower_case __UpperCAmelCase : Any = strip_accents __UpperCAmelCase : List[str] = tokenize_chinese_chars __UpperCAmelCase : Any = normalizer_class(**UpperCamelCase_) __UpperCAmelCase : List[str] = do_lower_case def a_ ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int]=None): """simple docstring""" __UpperCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def a_ ( self : int , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : List[str] = [self.sep_token_id] __UpperCAmelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def a_ ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" __UpperCAmelCase : Optional[Any] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_) return tuple(UpperCamelCase_)
77
"""simple docstring""" import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed A = """true""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=16 ) -> Tuple: """simple docstring""" set_seed(42 ) __UpperCAmelCase : Dict = RegressionModel() __UpperCAmelCase : Optional[Any] = deepcopy(UpperCamelCase ) __UpperCAmelCase : Any = RegressionDataset(length=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = DataLoader(UpperCamelCase , batch_size=UpperCamelCase ) model.to(accelerator.device ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.prepare(UpperCamelCase , UpperCamelCase ) return model, ddp_model, dataloader def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=False ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) __UpperCAmelCase : Dict = load_dataset("glue" , "mrpc" , split="validation" ) def tokenize_function(UpperCamelCase ): __UpperCAmelCase : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase , max_length=UpperCamelCase ) return outputs with accelerator.main_process_first(): __UpperCAmelCase : str = dataset.map( UpperCamelCase , batched=UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) __UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(UpperCamelCase ): if use_longest: return tokenizer.pad(UpperCamelCase , padding="longest" , return_tensors="pt" ) return tokenizer.pad(UpperCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" ) return DataLoader(UpperCamelCase , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=16 ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[Any] = Accelerator(dispatch_batches=UpperCamelCase , split_batches=UpperCamelCase ) __UpperCAmelCase : int = get_dataloader(UpperCamelCase , not dispatch_batches ) __UpperCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" , return_dict=UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(UpperCamelCase , UpperCamelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Dict = [] for batch in dataloader: __UpperCAmelCase , __UpperCAmelCase : int = batch.values() with torch.no_grad(): __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = [], [] for logit, targ in logits_and_targets: logits.append(UpperCamelCase ) targs.append(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = torch.cat(UpperCamelCase ), torch.cat(UpperCamelCase ) return logits, targs def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=16 ) -> int: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = get_basic_setup(UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = generate_predictions(UpperCamelCase , UpperCamelCase , UpperCamelCase ) assert ( len(UpperCamelCase ) == num_samples ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCamelCase )}" def _UpperCamelCase ( UpperCamelCase = False , UpperCamelCase = False ) -> List[str]: """simple docstring""" __UpperCAmelCase : List[str] = evaluate.load("glue" , "mrpc" ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = get_mrpc_setup(UpperCamelCase , UpperCamelCase ) # First do baseline __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = setup["no"] model.to(UpperCamelCase ) model.eval() for batch in dataloader: batch.to(UpperCamelCase ) with torch.inference_mode(): __UpperCAmelCase : List[str] = model(**UpperCamelCase ) __UpperCAmelCase : str = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=UpperCamelCase , references=batch["labels"] ) __UpperCAmelCase : str = metric.compute() # Then do distributed __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): __UpperCAmelCase : Any = model(**UpperCamelCase ) __UpperCAmelCase : str = outputs.logits.argmax(dim=-1 ) __UpperCAmelCase : Union[str, Any] = batch["labels"] __UpperCAmelCase , __UpperCAmelCase : Any = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=UpperCamelCase , references=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n" def _UpperCamelCase ( ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Dict = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" ) test_mrpc(UpperCamelCase , UpperCamelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: __UpperCAmelCase : Union[str, Any] = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase ) if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" ) test_torch_metrics(UpperCamelCase , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) __UpperCAmelCase : Any = Accelerator() test_torch_metrics(UpperCamelCase , 512 ) accelerator.state._reset_state() def _UpperCamelCase ( UpperCamelCase ) -> Optional[Any]: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
77
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer A = logging.get_logger(__name__) A = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} A = { """vocab_file""": { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""", """bert-base-multilingual-uncased""": ( """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt""" ), """bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""", """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt""" ), """bert-base-cased-finetuned-mrpc""": ( """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt""" ), """bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""", """bert-base-german-dbmdz-uncased""": ( """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt""" ), """wietsedv/bert-base-dutch-cased""": ( """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""", """bert-base-multilingual-uncased""": ( """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json""" ), """bert-base-multilingual-cased""": ( """https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json""" ), """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json""" ), """bert-base-cased-finetuned-mrpc""": ( """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json""" ), """bert-base-german-dbmdz-cased""": ( """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json""" ), """bert-base-german-dbmdz-uncased""": ( """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json""" ), """wietsedv/bert-base-dutch-cased""": ( """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json""" ), }, } A = { """bert-base-uncased""": 512, """bert-large-uncased""": 512, """bert-base-cased""": 512, """bert-large-cased""": 512, """bert-base-multilingual-uncased""": 512, """bert-base-multilingual-cased""": 512, """bert-base-chinese""": 512, """bert-base-german-cased""": 512, """bert-large-uncased-whole-word-masking""": 512, """bert-large-cased-whole-word-masking""": 512, """bert-large-uncased-whole-word-masking-finetuned-squad""": 512, """bert-large-cased-whole-word-masking-finetuned-squad""": 512, """bert-base-cased-finetuned-mrpc""": 512, """bert-base-german-dbmdz-cased""": 512, """bert-base-german-dbmdz-uncased""": 512, """TurkuNLP/bert-base-finnish-cased-v1""": 512, """TurkuNLP/bert-base-finnish-uncased-v1""": 512, """wietsedv/bert-base-dutch-cased""": 512, } A = { """bert-base-uncased""": {"""do_lower_case""": True}, """bert-large-uncased""": {"""do_lower_case""": True}, """bert-base-cased""": {"""do_lower_case""": False}, """bert-large-cased""": {"""do_lower_case""": False}, """bert-base-multilingual-uncased""": {"""do_lower_case""": True}, """bert-base-multilingual-cased""": {"""do_lower_case""": False}, """bert-base-chinese""": {"""do_lower_case""": False}, """bert-base-german-cased""": {"""do_lower_case""": False}, """bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True}, """bert-large-cased-whole-word-masking""": {"""do_lower_case""": False}, """bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True}, """bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False}, """bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False}, """bert-base-german-dbmdz-cased""": {"""do_lower_case""": False}, """bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True}, """TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False}, """TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True}, """wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False}, } class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = BertTokenizer def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Union[str, Any]="[UNK]" , UpperCamelCase_ : Union[str, Any]="[SEP]" , UpperCamelCase_ : List[Any]="[PAD]" , UpperCamelCase_ : List[Any]="[CLS]" , UpperCamelCase_ : Tuple="[MASK]" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Optional[int] , ): """simple docstring""" super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase" , UpperCamelCase_) != do_lower_case or normalizer_state.get("strip_accents" , UpperCamelCase_) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCamelCase_) != tokenize_chinese_chars ): __UpperCAmelCase : Dict = getattr(UpperCamelCase_ , normalizer_state.pop("type")) __UpperCAmelCase : Any = do_lower_case __UpperCAmelCase : List[str] = strip_accents __UpperCAmelCase : Any = tokenize_chinese_chars __UpperCAmelCase : Union[str, Any] = normalizer_class(**UpperCamelCase_) __UpperCAmelCase : List[Any] = do_lower_case def a_ ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any]=None): """simple docstring""" __UpperCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def a_ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Optional[int] = [self.sep_token_id] __UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def a_ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" __UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_) return tuple(UpperCamelCase_)
77
"""simple docstring""" import math def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = 0 , UpperCamelCase = 0 ) -> list: """simple docstring""" __UpperCAmelCase : Union[str, Any] = end or len(UpperCamelCase ) for i in range(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : List[Any] = i __UpperCAmelCase : Any = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __UpperCAmelCase : Dict = array[temp_index - 1] temp_index -= 1 __UpperCAmelCase : str = temp_index_value return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None: # Max Heap """simple docstring""" __UpperCAmelCase : Optional[Any] = index __UpperCAmelCase : List[str] = 2 * index + 1 # Left Node __UpperCAmelCase : Union[str, Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __UpperCAmelCase : Tuple = left_index if right_index < heap_size and array[largest] < array[right_index]: __UpperCAmelCase : int = right_index if largest != index: __UpperCAmelCase , __UpperCAmelCase : List[str] = array[largest], array[index] heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" __UpperCAmelCase : List[Any] = len(UpperCamelCase ) for i in range(n // 2 , -1 , -1 ): heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) for i in range(n - 1 , 0 , -1 ): __UpperCAmelCase , __UpperCAmelCase : int = array[0], array[i] heapify(UpperCamelCase , 0 , UpperCamelCase ) return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Optional[Any] = low __UpperCAmelCase : List[str] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __UpperCAmelCase , __UpperCAmelCase : Optional[int] = array[j], array[i] i += 1 def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" if len(UpperCamelCase ) == 0: return array __UpperCAmelCase : Optional[int] = 2 * math.ceil(math.loga(len(UpperCamelCase ) ) ) __UpperCAmelCase : List[Any] = 16 return intro_sort(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(UpperCamelCase ) max_depth -= 1 __UpperCAmelCase : List[Any] = median_of_a(UpperCamelCase , UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 ) __UpperCAmelCase : Union[str, Any] = partition(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) intro_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = p return insertion_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() A = input("""Enter numbers separated by a comma : """).strip() A = [float(item) for item in user_input.split(""",""")] print(sort(unsorted))
77
1
"""simple docstring""" import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class a__ ( unittest.TestCase ): def a_ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : Union[str, Any] = jnp.ones((batch_size, length)) / length return scores def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : List[Any] = None __UpperCAmelCase : int = 20 __UpperCAmelCase : int = self._get_uniform_logits(batch_size=2 , length=UpperCamelCase_) # tweak scores to not be uniform anymore __UpperCAmelCase : List[Any] = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch __UpperCAmelCase : int = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch # compute softmax __UpperCAmelCase : List[str] = jax.nn.softmax(UpperCamelCase_ , axis=-1) __UpperCAmelCase : str = FlaxTemperatureLogitsWarper(temperature=0.5) __UpperCAmelCase : Tuple = FlaxTemperatureLogitsWarper(temperature=1.3) __UpperCAmelCase : Union[str, Any] = jax.nn.softmax(temp_dist_warper_sharper(UpperCamelCase_ , scores.copy() , cur_len=UpperCamelCase_) , axis=-1) __UpperCAmelCase : List[Any] = jax.nn.softmax(temp_dist_warper_smoother(UpperCamelCase_ , scores.copy() , cur_len=UpperCamelCase_) , axis=-1) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3)) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3)) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max()) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min()) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max()) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min()) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : int = None __UpperCAmelCase : Tuple = 10 __UpperCAmelCase : Any = 2 # create ramp distribution __UpperCAmelCase : Tuple = np.broadcast_to(np.arange(UpperCamelCase_)[None, :] , (batch_size, vocab_size)).copy() __UpperCAmelCase : Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size __UpperCAmelCase : str = FlaxTopKLogitsWarper(3) __UpperCAmelCase : Union[str, Any] = top_k_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False]) self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True]) # check special case __UpperCAmelCase : Tuple = 5 __UpperCAmelCase : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3) __UpperCAmelCase : str = np.broadcast_to(np.arange(UpperCamelCase_)[None, :] , (batch_size, length)).copy() __UpperCAmelCase : str = top_k_warp_safety_check(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2]) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Any = None __UpperCAmelCase : Optional[int] = 10 __UpperCAmelCase : Dict = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) __UpperCAmelCase : List[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]])) __UpperCAmelCase : Optional[Any] = FlaxTopPLogitsWarper(0.8) __UpperCAmelCase : int = np.exp(top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_)) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 __UpperCAmelCase : Optional[Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]]) self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3)) # check edge cases with negative and extreme logits __UpperCAmelCase : Optional[Any] = np.broadcast_to(np.arange(UpperCamelCase_)[None, :] , (batch_size, vocab_size)).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme __UpperCAmelCase : int = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept __UpperCAmelCase : Optional[int] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0) __UpperCAmelCase : int = top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2]) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : int = 20 __UpperCAmelCase : Optional[Any] = 4 __UpperCAmelCase : Union[str, Any] = 0 __UpperCAmelCase : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase_) # check that min length is applied at length 5 __UpperCAmelCase : List[Any] = ids_tensor((batch_size, 20) , vocab_size=20) __UpperCAmelCase : int = 5 __UpperCAmelCase : Any = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : int = min_dist_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")]) # check that min length is not applied anymore at length 15 __UpperCAmelCase : List[str] = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Dict = 15 __UpperCAmelCase : Optional[int] = min_dist_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) self.assertFalse(jnp.isinf(UpperCamelCase_).any()) def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Optional[Any] = 20 __UpperCAmelCase : Optional[int] = 4 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase_) # check that all scores are -inf except the bos_token_id score __UpperCAmelCase : Optional[Any] = ids_tensor((batch_size, 1) , vocab_size=20) __UpperCAmelCase : Union[str, Any] = 1 __UpperCAmelCase : List[Any] = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : List[str] = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all()) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 __UpperCAmelCase : List[Any] = 3 __UpperCAmelCase : str = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : List[str] = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) self.assertFalse(jnp.isinf(UpperCamelCase_).any()) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : List[Any] = 20 __UpperCAmelCase : List[str] = 4 __UpperCAmelCase : Optional[Any] = 0 __UpperCAmelCase : List[Any] = 5 __UpperCAmelCase : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase_ , eos_token_id=UpperCamelCase_) # check that all scores are -inf except the eos_token_id when max_length is reached __UpperCAmelCase : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20) __UpperCAmelCase : Optional[int] = 4 __UpperCAmelCase : int = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : int = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all()) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached __UpperCAmelCase : Dict = 3 __UpperCAmelCase : Optional[Any] = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Dict = logits_processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) self.assertFalse(jnp.isinf(UpperCamelCase_).any()) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : int = 4 __UpperCAmelCase : Optional[Any] = 10 __UpperCAmelCase : Any = 15 __UpperCAmelCase : List[str] = 2 __UpperCAmelCase : Union[str, Any] = 1 __UpperCAmelCase : Tuple = 15 # dummy input_ids and scores __UpperCAmelCase : Dict = ids_tensor((batch_size, sequence_length) , UpperCamelCase_) __UpperCAmelCase : Tuple = input_ids.copy() __UpperCAmelCase : Dict = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[Any] = scores.copy() # instantiate all dist processors __UpperCAmelCase : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5) __UpperCAmelCase : List[str] = FlaxTopKLogitsWarper(3) __UpperCAmelCase : str = FlaxTopPLogitsWarper(0.8) # instantiate all logits processors __UpperCAmelCase : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase_) __UpperCAmelCase : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase_) __UpperCAmelCase : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase_ , eos_token_id=UpperCamelCase_) __UpperCAmelCase : Any = 10 # no processor list __UpperCAmelCase : List[str] = temp_dist_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) __UpperCAmelCase : Optional[int] = top_k_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) __UpperCAmelCase : Optional[Any] = top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) __UpperCAmelCase : int = min_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) __UpperCAmelCase : Optional[int] = bos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) __UpperCAmelCase : Optional[int] = eos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) # with processor list __UpperCAmelCase : str = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc]) __UpperCAmelCase : Optional[int] = processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) # scores should be equal self.assertTrue(jnp.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist()) def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Tuple = 4 __UpperCAmelCase : str = 10 __UpperCAmelCase : Optional[int] = 15 __UpperCAmelCase : int = 2 __UpperCAmelCase : List[str] = 1 __UpperCAmelCase : Dict = 15 # dummy input_ids and scores __UpperCAmelCase : Any = ids_tensor((batch_size, sequence_length) , UpperCamelCase_) __UpperCAmelCase : Tuple = input_ids.copy() __UpperCAmelCase : str = self._get_uniform_logits(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : int = scores.copy() # instantiate all dist processors __UpperCAmelCase : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5) __UpperCAmelCase : Optional[int] = FlaxTopKLogitsWarper(3) __UpperCAmelCase : str = FlaxTopPLogitsWarper(0.8) # instantiate all logits processors __UpperCAmelCase : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase_) __UpperCAmelCase : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase_) __UpperCAmelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase_ , eos_token_id=UpperCamelCase_) __UpperCAmelCase : Optional[Any] = 10 # no processor list def run_no_processor_list(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict): __UpperCAmelCase : Any = temp_dist_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) __UpperCAmelCase : List[str] = top_k_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) __UpperCAmelCase : int = top_p_warp(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) __UpperCAmelCase : int = min_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) __UpperCAmelCase : int = bos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) __UpperCAmelCase : Any = eos_dist_proc(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) return scores # with processor list def run_processor_list(UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict): __UpperCAmelCase : List[Any] = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc]) __UpperCAmelCase : Optional[Any] = processor(UpperCamelCase_ , UpperCamelCase_ , cur_len=UpperCamelCase_) return scores __UpperCAmelCase : str = jax.jit(UpperCamelCase_) __UpperCAmelCase : List[str] = jax.jit(UpperCamelCase_) __UpperCAmelCase : List[Any] = jitted_run_no_processor_list(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Dict = jitted_run_processor_list(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) # scores should be equal self.assertTrue(jnp.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
77
"""simple docstring""" import numpy as np from PIL import Image def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : str = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Any = 0 __UpperCAmelCase : Dict = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Tuple = 0 # compute the shape of the output matrix __UpperCAmelCase : Optional[int] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __UpperCAmelCase : List[str] = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __UpperCAmelCase : str = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : int = 0 __UpperCAmelCase : int = 0 return updated_arr def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : List[str] = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : List[str] = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Any = 0 # compute the shape of the output matrix __UpperCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __UpperCAmelCase : str = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __UpperCAmelCase : Tuple = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Optional[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="""avgpooling""", verbose=True) # Loading the image A = Image.open("""path_to_image""") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
77
1
"""simple docstring""" A = 0 # The first color of the flag. A = 1 # The second color of the flag. A = 2 # The third color of the flag. A = (red, white, blue) def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" if not sequence: return [] if len(UpperCamelCase ) == 1: return list(UpperCamelCase ) __UpperCAmelCase : int = 0 __UpperCAmelCase : str = len(UpperCamelCase ) - 1 __UpperCAmelCase : Optional[Any] = 0 while mid <= high: if sequence[mid] == colors[0]: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: __UpperCAmelCase , __UpperCAmelCase : List[str] = sequence[high], sequence[mid] high -= 1 else: __UpperCAmelCase : Optional[int] = f"The elements inside the sequence must contains only {colors} values" raise ValueError(UpperCamelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod() A = input("""Enter numbers separated by commas:\n""").strip() A = [int(item.strip()) for item in user_input.split(""",""")] print(f'''{dutch_national_flag_sort(unsorted)}''')
77
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: A = None A = logging.get_logger(__name__) A = """▁""" A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} A = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } A = { """google/pegasus-xsum""": 512, } class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PegasusTokenizer lowercase_ = ["input_ids", "attention_mask"] def __init__( self : str , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Any="<unk>" , UpperCamelCase_ : Tuple="<mask_2>" , UpperCamelCase_ : Any="<mask_1>" , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : str=103 , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" __UpperCAmelCase : Optional[int] = offset if additional_special_tokens is not None: if not isinstance(UpperCamelCase_ , UpperCamelCase_): raise TypeError( F"additional_special_tokens should be of type {type(UpperCamelCase_)}, but is" F" {type(UpperCamelCase_)}") __UpperCAmelCase : Any = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F"<unk_{i}>" for i in range(len(UpperCamelCase_) , self.offset - 1) ] if len(set(UpperCamelCase_)) != len(UpperCamelCase_): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.") __UpperCAmelCase : str = additional_special_tokens_extended else: __UpperCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset)] super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , pad_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = vocab_file __UpperCAmelCase : List[str] = False if not self.vocab_file else True def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : int = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens) + 3)): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" F" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}") return [1 if x in all_special_ids else 0 for x in seq] def a_ ( self : Union[str, Any] , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return self._special_token_mask(UpperCamelCase_) elif token_ids_a is None: return self._special_token_mask(UpperCamelCase_) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a) + [1] def a_ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None): """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def a_ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_): copyfile(self.vocab_file , UpperCamelCase_) return (out_vocab_file,)
77
1
"""simple docstring""" from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class a__ ( unittest.TestCase ): def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : int = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) __UpperCAmelCase : Tuple = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above __UpperCAmelCase : List[str] = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above __UpperCAmelCase : Any = tf_top_k_top_p_filtering(UpperCamelCase_ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4) __UpperCAmelCase : List[Any] = output[output != -float("inf")] __UpperCAmelCase : Tuple = tf.cast( tf.where(tf.not_equal(UpperCamelCase_ , tf.constant(-float("inf") , dtype=tf.floataa))) , dtype=tf.intaa , ) tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1e-12) tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_) @require_tf class a__ ( unittest.TestCase , __magic_name__ ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): lowercase_ = { "AutoModelForCausalLM": TFAutoModelForCausalLM, "AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq, "AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM, "AutoModelForVision2Seq": TFAutoModelForVisionaSeq, "LogitsProcessorList": TFLogitsProcessorList, "MinLengthLogitsProcessor": TFMinLengthLogitsProcessor, "create_tensor_fn": tf.convert_to_tensor, "floats_tensor": floats_tensor, "return_tensors": "tf", } @slow def a_ ( self : int): """simple docstring""" __UpperCAmelCase : int = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") __UpperCAmelCase : str = 2 __UpperCAmelCase : int = 2 class a__ ( tf.Module ): def __init__( self : List[Any] , UpperCamelCase_ : Tuple): """simple docstring""" super(UpperCamelCase_ , self).__init__() __UpperCAmelCase : Optional[Any] = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids"), tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask"), ) , jit_compile=UpperCamelCase_ , ) def a_ ( self : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[str]): """simple docstring""" __UpperCAmelCase : Optional[int] = self.model.generate( input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , max_new_tokens=UpperCamelCase_ , return_dict_in_generate=UpperCamelCase_ , ) return {"sequences": outputs["sequences"]} __UpperCAmelCase : str = [[2, 0], [102, 103]] __UpperCAmelCase : str = [[1, 0], [1, 1]] __UpperCAmelCase : str = DummyModel(model=UpperCamelCase_) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCamelCase_ , UpperCamelCase_ , signatures={"serving_default": dummy_model.serving}) __UpperCAmelCase : List[str] = tf.saved_model.load(UpperCamelCase_).signatures["serving_default"] for batch_size in range(1 , len(UpperCamelCase_) + 1): __UpperCAmelCase : Optional[int] = { "input_ids": tf.constant(dummy_input_ids[:batch_size]), "attention_mask": tf.constant(dummy_attention_masks[:batch_size]), } __UpperCAmelCase : str = serving_func(**UpperCamelCase_)["sequences"] __UpperCAmelCase : str = test_model.generate(**UpperCamelCase_ , max_new_tokens=UpperCamelCase_) tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_) @slow def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Optional[int] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") __UpperCAmelCase : List[str] = 1 __UpperCAmelCase : List[Any] = 2 class a__ ( tf.Module ): def __init__( self : Union[str, Any] , UpperCamelCase_ : Any): """simple docstring""" super(UpperCamelCase_ , self).__init__() __UpperCAmelCase : int = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids"), tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask"), ) , jit_compile=UpperCamelCase_ , ) def a_ ( self : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]): """simple docstring""" __UpperCAmelCase : Tuple = self.model.generate( input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , max_new_tokens=UpperCamelCase_ , return_dict_in_generate=UpperCamelCase_ , ) return {"sequences": outputs["sequences"]} __UpperCAmelCase : Union[str, Any] = [[2], [102, 103]] __UpperCAmelCase : str = [[1], [1, 1]] __UpperCAmelCase : Optional[int] = DummyModel(model=UpperCamelCase_) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCamelCase_ , UpperCamelCase_ , signatures={"serving_default": dummy_model.serving}) __UpperCAmelCase : Tuple = tf.saved_model.load(UpperCamelCase_).signatures["serving_default"] for input_row in range(len(UpperCamelCase_)): __UpperCAmelCase : Dict = { "input_ids": tf.constant([dummy_input_ids[input_row]]), "attention_mask": tf.constant([dummy_attention_masks[input_row]]), } __UpperCAmelCase : Dict = serving_func(**UpperCamelCase_)["sequences"] __UpperCAmelCase : int = test_model.generate(**UpperCamelCase_ , max_new_tokens=UpperCamelCase_) tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_) @slow @require_tensorflow_text def a_ ( self : Any): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=UpperCamelCase_) class a__ ( tf.keras.layers.Layer ): def __init__( self : Tuple): """simple docstring""" super().__init__() __UpperCAmelCase : Dict = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(UpperCamelCase_ , "spiece.model") , "rb").read()) __UpperCAmelCase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5") def a_ ( self : List[Any] , UpperCamelCase_ : Tuple , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : Optional[int] = self.tokenizer.tokenize(UpperCamelCase_) __UpperCAmelCase , __UpperCAmelCase : List[str] = text.pad_model_inputs( UpperCamelCase_ , max_seq_length=64 , pad_value=self.model.config.pad_token_id) __UpperCAmelCase : int = self.model.generate(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_) return self.tokenizer.detokenize(UpperCamelCase_) __UpperCAmelCase : Dict = CompleteSentenceTransformer() __UpperCAmelCase : List[str] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs") __UpperCAmelCase : Optional[Any] = complete_model(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = tf.keras.Model(UpperCamelCase_ , UpperCamelCase_) keras_model.save(UpperCamelCase_) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Tuple = { "do_sample": True, "num_beams": 1, "top_p": 0.7, "top_k": 10, "temperature": 0.7, } __UpperCAmelCase : List[str] = 14 __UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") __UpperCAmelCase : List[str] = "Hello, my dog is cute and" __UpperCAmelCase : Optional[Any] = tokenizer(UpperCamelCase_ , return_tensors="tf") __UpperCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") __UpperCAmelCase : Optional[int] = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(":/CPU:0"): tf.random.set_seed(0) __UpperCAmelCase : List[Any] = model.generate(**UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_) self.assertTrue(expectation == len(generated_tokens[0])) __UpperCAmelCase : Dict = [638, 198] with tf.device(":/CPU:0"): tf.random.set_seed(0) __UpperCAmelCase : List[Any] = model.generate(**UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_) self.assertTrue(expectation == len(generated_tokens[0])) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") __UpperCAmelCase : Dict = "Hugging Face is a technology company based in New York and Paris." __UpperCAmelCase : Tuple = bart_tokenizer(UpperCamelCase_ , return_tensors="tf").input_ids __UpperCAmelCase : List[str] = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart") __UpperCAmelCase : int = bart_model.generate(UpperCamelCase_).numpy() class a__ ( __magic_name__ ): def a_ ( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : List[Any]=None , **UpperCamelCase_ : int): """simple docstring""" return super().call(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : List[Any] = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart") __UpperCAmelCase : Union[str, Any] = bart_model.generate(UpperCamelCase_ , foo="bar").numpy() self.assertTrue(np.array_equal(UpperCamelCase_ , UpperCamelCase_)) class a__ ( bart_model.model.encoder.__class__ ): def a_ ( self : Dict , UpperCamelCase_ : int , **UpperCamelCase_ : Dict): """simple docstring""" return super().call(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : Dict = FakeEncoder(bart_model.config , bart_model.model.shared) __UpperCAmelCase : str = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) __UpperCAmelCase : List[str] = bart_model.generate(UpperCamelCase_).numpy() with self.assertRaises(UpperCamelCase_): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(UpperCamelCase_ , foo="bar")
77
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: """simple docstring""" # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file __UpperCAmelCase : Optional[Any] = TapasConfig.from_json_file(UpperCamelCase ) # set absolute/relative position embeddings parameter __UpperCAmelCase : Optional[Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": __UpperCAmelCase : List[str] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "WTQ": # run_task_main.py hparams __UpperCAmelCase : Tuple = 4 __UpperCAmelCase : Any = True # hparam_utils.py hparams __UpperCAmelCase : Union[str, Any] = 0.664694 __UpperCAmelCase : Union[str, Any] = 0.207951 __UpperCAmelCase : int = 0.121194 __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : List[str] = True __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : List[str] = 0.0352513 __UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams __UpperCAmelCase : int = 4 __UpperCAmelCase : Optional[int] = False # hparam_utils.py hparams __UpperCAmelCase : int = 36.4519 __UpperCAmelCase : str = 0.903421 __UpperCAmelCase : Dict = 222.088 __UpperCAmelCase : Dict = True __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : Tuple = True __UpperCAmelCase : Any = 0.763141 __UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "TABFACT": __UpperCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase ) elif task == "MLM": __UpperCAmelCase : Tuple = TapasForMaskedLM(config=UpperCamelCase ) elif task == "INTERMEDIATE_PRETRAINING": __UpperCAmelCase : List[str] = TapasModel(config=UpperCamelCase ) else: raise ValueError(f"Task {task} not supported." ) print(f"Building PyTorch model from configuration: {config}" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Save pytorch-model (weights and configuration) print(f"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(UpperCamelCase ) # Save tokenizer files print(f"Save tokenizer files to {pytorch_dump_path}" ) __UpperCAmelCase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 ) tokenizer.save_pretrained(UpperCamelCase ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) A = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
77
1
"""simple docstring""" import heapq as hq import math from collections.abc import Iterator class a__ : def __init__( self : Optional[Any] , UpperCamelCase_ : List[str]): """simple docstring""" __UpperCAmelCase : Tuple = str(id_) __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : Optional[Any] = [] __UpperCAmelCase : str = {} # {vertex:distance} def __lt__( self : Optional[int] , UpperCamelCase_ : int): """simple docstring""" return self.key < other.key def __repr__( self : Tuple): """simple docstring""" return self.id def a_ ( self : int , UpperCamelCase_ : str): """simple docstring""" self.neighbors.append(UpperCamelCase_) def a_ ( self : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : int = weight def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: """simple docstring""" # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , UpperCamelCase ) graph[b - 1].add_edge(graph[a - 1] , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> list: """simple docstring""" __UpperCAmelCase : Optional[int] = [] for u in graph: __UpperCAmelCase : Optional[int] = math.inf __UpperCAmelCase : List[Any] = None __UpperCAmelCase : int = 0 __UpperCAmelCase : Dict = graph[:] while q: __UpperCAmelCase : Optional[int] = min(UpperCamelCase ) q.remove(UpperCamelCase ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): __UpperCAmelCase : Any = u __UpperCAmelCase : List[Any] = u.edges[v.id] for i in range(1 , len(UpperCamelCase ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Iterator[tuple]: """simple docstring""" for u in graph: __UpperCAmelCase : Dict = math.inf __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : Optional[int] = 0 __UpperCAmelCase : Any = list(UpperCamelCase ) hq.heapify(UpperCamelCase ) while h: __UpperCAmelCase : Any = hq.heappop(UpperCamelCase ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): __UpperCAmelCase : Any = u __UpperCAmelCase : List[Any] = u.edges[v.id] hq.heapify(UpperCamelCase ) for i in range(1 , len(UpperCamelCase ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def _UpperCamelCase ( ) -> None: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
77
"""simple docstring""" from typing import Union import fire import torch from tqdm import tqdm def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = "cpu" , UpperCamelCase = None ) -> None: """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch.load(UpperCamelCase , map_location=UpperCamelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(UpperCamelCase , torch.Tensor ): raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" ) __UpperCAmelCase : Optional[Any] = v.half() if save_path is None: # overwrite src_path __UpperCAmelCase : str = src_path torch.save(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": fire.Fire(convert)
77
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ShapEPipeline lowercase_ = ["prompt"] lowercase_ = ["prompt"] lowercase_ = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] lowercase_ = False @property def a_ ( self : Optional[int]): """simple docstring""" return 32 @property def a_ ( self : Any): """simple docstring""" return 32 @property def a_ ( self : int): """simple docstring""" return self.time_input_dim * 4 @property def a_ ( self : List[Any]): """simple docstring""" return 8 @property def a_ ( self : List[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def a_ ( self : List[str]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(UpperCamelCase_) @property def a_ ( self : Any): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Union[str, Any] = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } __UpperCAmelCase : Dict = PriorTransformer(**UpperCamelCase_) return model @property def a_ ( self : Union[str, Any]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Tuple = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } __UpperCAmelCase : List[Any] = ShapERenderer(**UpperCamelCase_) return model def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.dummy_prior __UpperCAmelCase : str = self.dummy_text_encoder __UpperCAmelCase : int = self.dummy_tokenizer __UpperCAmelCase : int = self.dummy_renderer __UpperCAmelCase : Tuple = HeunDiscreteScheduler( beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , ) __UpperCAmelCase : str = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "renderer": renderer, "scheduler": scheduler, } return components def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any=0): """simple docstring""" if str(UpperCamelCase_).startswith("mps"): __UpperCAmelCase : List[Any] = torch.manual_seed(UpperCamelCase_) else: __UpperCAmelCase : str = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_) __UpperCAmelCase : List[Any] = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "np", } return inputs def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : str = "cpu" __UpperCAmelCase : Union[str, Any] = self.get_dummy_components() __UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase_)) __UpperCAmelCase : Union[str, Any] = output.images[0] __UpperCAmelCase : str = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __UpperCAmelCase : Union[str, Any] = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self : Tuple): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2]) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch_device == "cpu" __UpperCAmelCase : Optional[Any] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , ) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.get_dummy_components() __UpperCAmelCase : List[str] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : int = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Any = 2 __UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_) for key in inputs.keys(): if key in self.batch_params: __UpperCAmelCase : List[Any] = batch_size * [inputs[key]] __UpperCAmelCase : List[Any] = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_)[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class a__ ( unittest.TestCase ): def a_ ( self : List[str]): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy") __UpperCAmelCase : Optional[Any] = ShapEPipeline.from_pretrained("openai/shap-e") __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Dict = torch.Generator(device=UpperCamelCase_).manual_seed(0) __UpperCAmelCase : int = pipe( "a shark" , generator=UpperCamelCase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_)
77
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": A = pd.read_csv("""sample_data.csv""", header=None) A = df.shape[:1][0] # If you're using some other dataset input the target column A = df.iloc[:, 1:2] A = actual_data.values.reshape(len_data, 1) A = MinMaxScaler().fit_transform(actual_data) A = 10 A = 5 A = 20 A = len_data - periods * look_back A = actual_data[:division] A = actual_data[division - look_back :] A , A = [], [] A , A = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) A = np.array(train_x) A = np.array(test_x) A = np.array([list(i.ravel()) for i in train_y]) A = np.array([list(i.ravel()) for i in test_y]) A = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") A = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) A = model.predict(x_test)
77
1
"""simple docstring""" import os from datetime import datetime as dt from github import Github A = [ """good first issue""", """good second issue""", """good difficult issue""", """enhancement""", """new pipeline/model""", """new scheduler""", """wip""", ] def _UpperCamelCase ( ) -> str: """simple docstring""" __UpperCAmelCase : Union[str, Any] = Github(os.environ["GITHUB_TOKEN"] ) __UpperCAmelCase : Optional[Any] = g.get_repo("huggingface/diffusers" ) __UpperCAmelCase : int = repo.get_issues(state="open" ) for issue in open_issues: __UpperCAmelCase : Dict = sorted(issue.get_comments() , key=lambda UpperCamelCase : i.created_at , reverse=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = comments[0] if len(UpperCamelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="closed" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="open" ) issue.remove_from_labels("stale" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) issue.add_to_labels("stale" ) if __name__ == "__main__": main()
77
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin A = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right A = 250_004 A = 250_020 @require_sentencepiece @require_tokenizers class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = MBartTokenizer lowercase_ = MBartTokenizerFast lowercase_ = True lowercase_ = True def a_ ( self : str): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase : Any = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Dict = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer.tokenize("This is a test") self.assertListEqual(UpperCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __UpperCAmelCase : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) __UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def a_ ( self : Dict): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __UpperCAmelCase : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"): __UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = tempfile.mkdtemp() __UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_) __UpperCAmelCase : Any = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) __UpperCAmelCase : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=True __UpperCAmelCase : Optional[int] = tempfile.mkdtemp() __UpperCAmelCase : Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : int = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=False __UpperCAmelCase : Tuple = tempfile.mkdtemp() __UpperCAmelCase : int = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way __UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : str = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) @require_torch @require_sentencepiece @require_tokenizers class a__ ( unittest.TestCase ): lowercase_ = "facebook/mbart-large-en-ro" lowercase_ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] lowercase_ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] lowercase_ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def a_ ( cls : int): """simple docstring""" __UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO") __UpperCAmelCase : Union[str, Any] = 1 return cls def a_ ( self : List[Any]): """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) def a_ ( self : Optional[int]): """simple docstring""" self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids) __UpperCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] __UpperCAmelCase : Optional[Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_) self.assertEqual(UpperCamelCase_ , UpperCamelCase_) self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , UpperCamelCase_) __UpperCAmelCase : Tuple = 10 __UpperCAmelCase : List[Any] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_).input_ids[0] self.assertEqual(ids[-2] , 2) self.assertEqual(ids[-1] , UpperCamelCase_) self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_) def a_ ( self : Any): """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [250026, 250001]) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : List[str] = tempfile.mkdtemp() __UpperCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCamelCase_) __UpperCAmelCase : List[Any] = MBartTokenizer.from_pretrained(UpperCamelCase_) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_) @require_torch def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors="pt") __UpperCAmelCase : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Dict = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , ) __UpperCAmelCase : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_) self.assertEqual((2, 14) , batch.input_ids.shape) self.assertEqual((2, 14) , batch.attention_mask.shape) __UpperCAmelCase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , []) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE]) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : List[str] = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors="pt") __UpperCAmelCase : Any = self.tokenizer( text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors="pt") __UpperCAmelCase : int = targets["input_ids"] __UpperCAmelCase : Any = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1] , 3) self.assertEqual(batch.decoder_input_ids.shape[1] , 10) @require_torch def a_ ( self : int): """simple docstring""" __UpperCAmelCase : int = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR") self.assertEqual( nested_simplify(UpperCamelCase_) , { # A, test, EOS, en_XX "input_ids": [[62, 3034, 2, 250004]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, } , )
77
1
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=True , UpperCamelCase="pt" ) -> List[Any]: """simple docstring""" __UpperCAmelCase : List[Any] = {"add_prefix_space": True} if isinstance(UpperCamelCase , UpperCamelCase ) and not line.startswith(" " ) else {} __UpperCAmelCase : str = padding_side return tokenizer( [line] , max_length=UpperCamelCase , padding="max_length" if pad_to_max_length else None , truncation=UpperCamelCase , return_tensors=UpperCamelCase , add_special_tokens=UpperCamelCase , **UpperCamelCase , ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None , ) -> int: """simple docstring""" __UpperCAmelCase : Any = input_ids.ne(UpperCamelCase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class a__ ( __magic_name__ ): def __init__( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any]="train" , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]="" , ): """simple docstring""" super().__init__() __UpperCAmelCase : List[Any] = Path(UpperCamelCase_).joinpath(type_path + ".source") __UpperCAmelCase : List[str] = Path(UpperCamelCase_).joinpath(type_path + ".target") __UpperCAmelCase : int = self.get_char_lens(self.src_file) __UpperCAmelCase : Optional[int] = max_source_length __UpperCAmelCase : Dict = max_target_length assert min(self.src_lens) > 0, F"found empty line in {self.src_file}" __UpperCAmelCase : str = tokenizer __UpperCAmelCase : Optional[int] = prefix if n_obs is not None: __UpperCAmelCase : List[str] = self.src_lens[:n_obs] __UpperCAmelCase : Any = src_lang __UpperCAmelCase : List[Any] = tgt_lang def __len__( self : str): """simple docstring""" return len(self.src_lens) def __getitem__( self : Optional[Any] , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : int = index + 1 # linecache starts at 1 __UpperCAmelCase : Any = self.prefix + linecache.getline(str(self.src_file) , UpperCamelCase_).rstrip("\n") __UpperCAmelCase : Dict = linecache.getline(str(self.tgt_file) , UpperCamelCase_).rstrip("\n") assert source_line, F"empty source line for index {index}" assert tgt_line, F"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer , UpperCamelCase_): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right __UpperCAmelCase : Dict = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCamelCase_) else self.tokenizer ) __UpperCAmelCase : Dict = self.tokenizer.generator if isinstance(self.tokenizer , UpperCamelCase_) else self.tokenizer __UpperCAmelCase : List[str] = encode_line(UpperCamelCase_ , UpperCamelCase_ , self.max_source_length , "right") __UpperCAmelCase : List[str] = encode_line(UpperCamelCase_ , UpperCamelCase_ , self.max_target_length , "right") __UpperCAmelCase : str = source_inputs["input_ids"].squeeze() __UpperCAmelCase : Optional[Any] = target_inputs["input_ids"].squeeze() __UpperCAmelCase : Any = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def a_ ( UpperCamelCase_ : Tuple): """simple docstring""" return [len(UpperCamelCase_) for x in Path(UpperCamelCase_).open().readlines()] def a_ ( self : Dict , UpperCamelCase_ : Union[str, Any]): """simple docstring""" __UpperCAmelCase : str = torch.stack([x["input_ids"] for x in batch]) __UpperCAmelCase : List[str] = torch.stack([x["attention_mask"] for x in batch]) __UpperCAmelCase : Dict = torch.stack([x["decoder_input_ids"] for x in batch]) __UpperCAmelCase : int = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , UpperCamelCase_) else self.tokenizer.pad_token_id ) __UpperCAmelCase : List[Any] = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , UpperCamelCase_) else self.tokenizer.pad_token_id ) __UpperCAmelCase : str = trim_batch(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase , __UpperCAmelCase : Dict = trim_batch(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_) __UpperCAmelCase : str = { "input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y, } return batch A = getLogger(__name__) def _UpperCamelCase ( UpperCamelCase ) -> Optional[int]: """simple docstring""" return list(itertools.chain.from_iterable(UpperCamelCase ) ) def _UpperCamelCase ( UpperCamelCase ) -> None: """simple docstring""" __UpperCAmelCase : List[Any] = get_git_info() save_json(UpperCamelCase , os.path.join(UpperCamelCase , "git_log.json" ) ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=4 , **UpperCamelCase ) -> Dict: """simple docstring""" with open(UpperCamelCase , "w" ) as f: json.dump(UpperCamelCase , UpperCamelCase , indent=UpperCamelCase , **UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" with open(UpperCamelCase ) as f: return json.load(UpperCamelCase ) def _UpperCamelCase ( ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : List[str] = git.Repo(search_parent_directories=UpperCamelCase ) __UpperCAmelCase : Any = { "repo_id": str(UpperCamelCase ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), "hostname": str(socket.gethostname() ), } return repo_infos def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> List: """simple docstring""" return list(map(UpperCamelCase , UpperCamelCase ) ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: """simple docstring""" with open(UpperCamelCase , "wb" ) as f: return pickle.dump(UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase ) -> Optional[int]: """simple docstring""" def remove_articles(UpperCamelCase ): return re.sub(R"\b(a|an|the)\b" , " " , UpperCamelCase ) def white_space_fix(UpperCamelCase ): return " ".join(text.split() ) def remove_punc(UpperCamelCase ): __UpperCAmelCase : Optional[Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(UpperCamelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Optional[int] = normalize_answer(UpperCamelCase ).split() __UpperCAmelCase : Tuple = normalize_answer(UpperCamelCase ).split() __UpperCAmelCase : Union[str, Any] = Counter(UpperCamelCase ) & Counter(UpperCamelCase ) __UpperCAmelCase : str = sum(common.values() ) if num_same == 0: return 0 __UpperCAmelCase : Any = 1.0 * num_same / len(UpperCamelCase ) __UpperCAmelCase : List[str] = 1.0 * num_same / len(UpperCamelCase ) __UpperCAmelCase : Tuple = (2 * precision * recall) / (precision + recall) return fa def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> List[str]: """simple docstring""" return normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Dict: """simple docstring""" assert len(UpperCamelCase ) == len(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = 0 for hypo, pred in zip(UpperCamelCase , UpperCamelCase ): em += exact_match_score(UpperCamelCase , UpperCamelCase ) if len(UpperCamelCase ) > 0: em /= len(UpperCamelCase ) return {"em": em} def _UpperCamelCase ( UpperCamelCase ) -> Tuple: """simple docstring""" return model_prefix.startswith("rag" ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase : Union[str, Any] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead __UpperCAmelCase : Optional[int] = "dropout_rate" for p in extra_params: if getattr(UpperCamelCase , UpperCamelCase , UpperCamelCase ): if not hasattr(UpperCamelCase , UpperCamelCase ) and not hasattr(UpperCamelCase , equivalent_param[p] ): logger.info("config doesn't have a `{}` attribute".format(UpperCamelCase ) ) delattr(UpperCamelCase , UpperCamelCase ) continue __UpperCAmelCase : List[Any] = p if hasattr(UpperCamelCase , UpperCamelCase ) else equivalent_param[p] setattr(UpperCamelCase , UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase ) ) delattr(UpperCamelCase , UpperCamelCase ) return hparams, config
77
"""simple docstring""" from typing import Any class a__ : def __init__( self : List[str] , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : str = data __UpperCAmelCase : Optional[Any] = None class a__ : def __init__( self : Any): """simple docstring""" __UpperCAmelCase : Optional[int] = None def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.head while temp is not None: print(temp.data , end=" ") __UpperCAmelCase : Tuple = temp.next print() def a_ ( self : int , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : List[str] = Node(UpperCamelCase_) __UpperCAmelCase : str = self.head __UpperCAmelCase : Optional[int] = new_node def a_ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str): """simple docstring""" if node_data_a == node_data_a: return else: __UpperCAmelCase : int = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Tuple = node_a.next __UpperCAmelCase : List[Any] = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Optional[Any] = node_a.next if node_a is None or node_a is None: return __UpperCAmelCase , __UpperCAmelCase : Any = node_a.data, node_a.data if __name__ == "__main__": A = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("""After swapping""") ll.print_list()
77
1
"""simple docstring""" from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class a__ ( __magic_name__ ): lowercase_ = 42 class a__ ( __magic_name__ , __magic_name__ ): @register_to_config def __init__( self : Optional[int] , UpperCamelCase_ : int = 32 , UpperCamelCase_ : int = 64 , UpperCamelCase_ : int = 20 , UpperCamelCase_ : int = 768 , UpperCamelCase_ : List[Any]=77 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : str = "silu" , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[str] = "linear" , UpperCamelCase_ : Optional[str] = "prd" , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , ): """simple docstring""" super().__init__() __UpperCAmelCase : int = num_attention_heads __UpperCAmelCase : int = attention_head_dim __UpperCAmelCase : Tuple = num_attention_heads * attention_head_dim __UpperCAmelCase : List[str] = additional_embeddings __UpperCAmelCase : int = time_embed_dim or inner_dim __UpperCAmelCase : List[str] = embedding_proj_dim or embedding_dim __UpperCAmelCase : Optional[Any] = clip_embed_dim or embedding_dim __UpperCAmelCase : Tuple = Timesteps(UpperCamelCase_ , UpperCamelCase_ , 0) __UpperCAmelCase : List[str] = TimestepEmbedding(UpperCamelCase_ , UpperCamelCase_ , out_dim=UpperCamelCase_ , act_fn=UpperCamelCase_) __UpperCAmelCase : int = nn.Linear(UpperCamelCase_ , UpperCamelCase_) if embedding_proj_norm_type is None: __UpperCAmelCase : Union[str, Any] = None elif embedding_proj_norm_type == "layer": __UpperCAmelCase : str = nn.LayerNorm(UpperCamelCase_) else: raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}") __UpperCAmelCase : List[str] = nn.Linear(UpperCamelCase_ , UpperCamelCase_) if encoder_hid_proj_type is None: __UpperCAmelCase : List[str] = None elif encoder_hid_proj_type == "linear": __UpperCAmelCase : List[str] = nn.Linear(UpperCamelCase_ , UpperCamelCase_) else: raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}") __UpperCAmelCase : List[Any] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase_)) if added_emb_type == "prd": __UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase_)) elif added_emb_type is None: __UpperCAmelCase : int = None else: raise ValueError( F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.") __UpperCAmelCase : Union[str, Any] = nn.ModuleList( [ BasicTransformerBlock( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dropout=UpperCamelCase_ , activation_fn="gelu" , attention_bias=UpperCamelCase_ , ) for d in range(UpperCamelCase_) ]) if norm_in_type == "layer": __UpperCAmelCase : List[Any] = nn.LayerNorm(UpperCamelCase_) elif norm_in_type is None: __UpperCAmelCase : Any = None else: raise ValueError(F"Unsupported norm_in_type: {norm_in_type}.") __UpperCAmelCase : List[str] = nn.LayerNorm(UpperCamelCase_) __UpperCAmelCase : Any = nn.Linear(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Any = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0) causal_attention_mask.triu_(1) __UpperCAmelCase : str = causal_attention_mask[None, ...] self.register_buffer("causal_attention_mask" , UpperCamelCase_ , persistent=UpperCamelCase_) __UpperCAmelCase : Tuple = nn.Parameter(torch.zeros(1 , UpperCamelCase_)) __UpperCAmelCase : Any = nn.Parameter(torch.zeros(1 , UpperCamelCase_)) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Dict = {} def fn_recursive_add_processors(UpperCamelCase_ : str , UpperCamelCase_ : torch.nn.Module , UpperCamelCase_ : Dict[str, AttentionProcessor]): if hasattr(UpperCamelCase_ , "set_processor"): __UpperCAmelCase : int = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"{name}.{sub_name}" , UpperCamelCase_ , UpperCamelCase_) return processors for name, module in self.named_children(): fn_recursive_add_processors(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) return processors def a_ ( self : str , UpperCamelCase_ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]): """simple docstring""" __UpperCAmelCase : int = len(self.attn_processors.keys()) if isinstance(UpperCamelCase_ , UpperCamelCase_) and len(UpperCamelCase_) != count: raise ValueError( F"A dict of processors was passed, but the number of processors {len(UpperCamelCase_)} does not match the" F" number of attention layers: {count}. Please make sure to pass {count} processor classes.") def fn_recursive_attn_processor(UpperCamelCase_ : str , UpperCamelCase_ : torch.nn.Module , UpperCamelCase_ : Any): if hasattr(UpperCamelCase_ , "set_processor"): if not isinstance(UpperCamelCase_ , UpperCamelCase_): module.set_processor(UpperCamelCase_) else: module.set_processor(processor.pop(F"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"{name}.{sub_name}" , UpperCamelCase_ , UpperCamelCase_) for name, module in self.named_children(): fn_recursive_attn_processor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) def a_ ( self : str): """simple docstring""" self.set_attn_processor(AttnProcessor()) def a_ ( self : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[torch.Tensor, float, int] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[torch.BoolTensor] = None , UpperCamelCase_ : bool = True , ): """simple docstring""" __UpperCAmelCase : int = hidden_states.shape[0] __UpperCAmelCase : Union[str, Any] = timestep if not torch.is_tensor(UpperCamelCase_): __UpperCAmelCase : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device) elif torch.is_tensor(UpperCamelCase_) and len(timesteps.shape) == 0: __UpperCAmelCase : Union[str, Any] = timesteps[None].to(hidden_states.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __UpperCAmelCase : str = timesteps * torch.ones(UpperCamelCase_ , dtype=timesteps.dtype , device=timesteps.device) __UpperCAmelCase : Union[str, Any] = self.time_proj(UpperCamelCase_) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. __UpperCAmelCase : Any = timesteps_projected.to(dtype=self.dtype) __UpperCAmelCase : Optional[int] = self.time_embedding(UpperCamelCase_) if self.embedding_proj_norm is not None: __UpperCAmelCase : List[str] = self.embedding_proj_norm(UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = self.embedding_proj(UpperCamelCase_) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: __UpperCAmelCase : Union[str, Any] = self.encoder_hidden_states_proj(UpperCamelCase_) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set") __UpperCAmelCase : List[Any] = self.proj_in(UpperCamelCase_) __UpperCAmelCase : str = self.positional_embedding.to(hidden_states.dtype) __UpperCAmelCase : int = [] __UpperCAmelCase : Union[str, Any] = 0 if encoder_hidden_states is not None: additional_embeds.append(UpperCamelCase_) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape) == 2: __UpperCAmelCase : Union[str, Any] = proj_embeddings[:, None, :] if len(hidden_states.shape) == 2: __UpperCAmelCase : List[str] = hidden_states[:, None, :] __UpperCAmelCase : List[str] = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: __UpperCAmelCase : Union[str, Any] = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase_ , -1 , -1) additional_embeds.append(UpperCamelCase_) __UpperCAmelCase : Optional[int] = torch.cat( UpperCamelCase_ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens __UpperCAmelCase : List[Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: __UpperCAmelCase : str = F.pad( UpperCamelCase_ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) __UpperCAmelCase : Tuple = hidden_states + positional_embeddings if attention_mask is not None: __UpperCAmelCase : Dict = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 __UpperCAmelCase : str = F.pad(UpperCamelCase_ , (0, self.additional_embeddings) , value=0.0) __UpperCAmelCase : List[str] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype) __UpperCAmelCase : List[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0) if self.norm_in is not None: __UpperCAmelCase : int = self.norm_in(UpperCamelCase_) for block in self.transformer_blocks: __UpperCAmelCase : int = block(UpperCamelCase_ , attention_mask=UpperCamelCase_) __UpperCAmelCase : Any = self.norm_out(UpperCamelCase_) if self.prd_embedding is not None: __UpperCAmelCase : Dict = hidden_states[:, -1] else: __UpperCAmelCase : str = hidden_states[:, additional_embeddings_len:] __UpperCAmelCase : List[str] = self.proj_to_clip_embeddings(UpperCamelCase_) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase_) def a_ ( self : str , UpperCamelCase_ : Dict): """simple docstring""" __UpperCAmelCase : str = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
77
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore A = """ Human: <<task>> Assistant: """ A = """huggingface-tools/default-prompts""" A = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""} def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase="run" ) -> List[str]: """simple docstring""" if prompt_or_repo_id is None: __UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , UpperCamelCase ) is not None: return prompt_or_repo_id __UpperCAmelCase : str = cached_file( UpperCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(UpperCamelCase , "r" , encoding="utf-8" ) as f: return f.read()
77
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) A = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""", """UniSpeechForCTC""", """UniSpeechForPreTraining""", """UniSpeechForSequenceClassification""", """UniSpeechModel""", """UniSpeechPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available A = { """configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""", """ErnieForCausalLM""", """ErnieForMaskedLM""", """ErnieForMultipleChoice""", """ErnieForNextSentencePrediction""", """ErnieForPreTraining""", """ErnieForQuestionAnswering""", """ErnieForSequenceClassification""", """ErnieForTokenClassification""", """ErnieModel""", """ErniePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
1
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase ) -> bool: """simple docstring""" if p < 2: raise ValueError("p should not be less than 2!" ) elif p == 2: return True __UpperCAmelCase : List[Any] = 4 __UpperCAmelCase : Union[str, Any] = (1 << p) - 1 for _ in range(p - 2 ): __UpperCAmelCase : str = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
77
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class a__ ( nn.Module ): def __init__( self : Union[str, Any]): """simple docstring""" super().__init__() __UpperCAmelCase : Optional[int] = nn.Linear(3 , 4) __UpperCAmelCase : str = nn.BatchNormad(4) __UpperCAmelCase : int = nn.Linear(4 , 5) def a_ ( self : str , UpperCamelCase_ : List[str]): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(UpperCamelCase_))) class a__ ( unittest.TestCase ): def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Optional[Any] = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , model.state_dict()) __UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase_ , "index.json") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: __UpperCAmelCase : Optional[int] = os.path.join(UpperCamelCase_ , F"{key}.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on the fact weights are properly loaded def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : int = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: __UpperCAmelCase : List[Any] = torch.randn(2 , 3 , dtype=UpperCamelCase_) with TemporaryDirectory() as tmp_dir: __UpperCAmelCase : Tuple = offload_weight(UpperCamelCase_ , "weight" , UpperCamelCase_ , {}) __UpperCAmelCase : Dict = os.path.join(UpperCamelCase_ , "weight.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) self.assertDictEqual(UpperCamelCase_ , {"weight": {"shape": [2, 3], "dtype": str(UpperCamelCase_).split(".")[1]}}) __UpperCAmelCase : Optional[Any] = load_offloaded_weight(UpperCamelCase_ , index["weight"]) self.assertTrue(torch.equal(UpperCamelCase_ , UpperCamelCase_)) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : List[Any] = ModelForTest() __UpperCAmelCase : Optional[int] = model.state_dict() __UpperCAmelCase : List[str] = {k: v for k, v in state_dict.items() if "linear2" not in k} __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "linear2" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : List[str] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "weight" in k} __UpperCAmelCase : Optional[Any] = {k: v for k, v in state_dict.items() if "weight" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[Any] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) # Duplicates are removed __UpperCAmelCase : str = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Any = {"a.1": 0, "a.10": 1, "a.2": 2} __UpperCAmelCase : Union[str, Any] = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1": 0, "a.2": 2}) __UpperCAmelCase : int = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2} __UpperCAmelCase : int = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1.a": 0, "a.2.a": 2})
77
1
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" __UpperCAmelCase : List[Any] = len(UpperCamelCase ) for _ in range(UpperCamelCase ): for i in range(_ % 2 , arr_size - 1 , 2 ): if arr[i + 1] < arr[i]: __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = arr[i + 1], arr[i] return arr if __name__ == "__main__": A = list(range(10, 0, -1)) print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
77
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Dict = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): __UpperCAmelCase : Union[str, Any] = n - k # Calculate C(n,k) for i in range(UpperCamelCase ): result *= n - i result //= i + 1 return result def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" return binomial_coefficient(2 * node_count , UpperCamelCase ) // (node_count + 1) def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" if n < 0: raise ValueError("factorial() not defined for negative values" ) __UpperCAmelCase : Optional[Any] = 1 for i in range(1 , n + 1 ): result *= i return result def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" return catalan_number(UpperCamelCase ) * factorial(UpperCamelCase ) if __name__ == "__main__": A = int(input("""Enter the number of nodes: """).strip() or 0) if node_count <= 0: raise ValueError("""We need some nodes to work with.""") print( f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} ''' f'''binary trees and {catalan_number(node_count)} binary search trees.''' )
77
1
"""simple docstring""" from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. A = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. A = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. A = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1_000)) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> tuple[str, float]: """simple docstring""" __UpperCAmelCase : str = len([g for position, g in enumerate(UpperCamelCase ) if g == main_target[position]] ) return (item, float(UpperCamelCase )) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> tuple[str, str]: """simple docstring""" __UpperCAmelCase : Optional[Any] = random.randint(0 , len(UpperCamelCase ) - 1 ) __UpperCAmelCase : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:] __UpperCAmelCase : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str: """simple docstring""" __UpperCAmelCase : Dict = list(UpperCamelCase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: __UpperCAmelCase : Union[str, Any] = random.choice(UpperCamelCase ) return "".join(UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> list[str]: """simple docstring""" __UpperCAmelCase : List[str] = [] # Generate more children proportionally to the fitness score. __UpperCAmelCase : int = int(parent_a[1] * 100 ) + 1 __UpperCAmelCase : List[str] = 10 if child_n >= 10 else child_n for _ in range(UpperCamelCase ): __UpperCAmelCase : str = population_score[random.randint(0 , UpperCamelCase )][0] __UpperCAmelCase , __UpperCAmelCase : List[Any] = crossover(parent_a[0] , UpperCamelCase ) # Append new string to the population list. pop.append(mutate(UpperCamelCase , UpperCamelCase ) ) pop.append(mutate(UpperCamelCase , UpperCamelCase ) ) return pop def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = True ) -> tuple[int, int, str]: """simple docstring""" # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: __UpperCAmelCase : Dict = f"{N_POPULATION} must be bigger than {N_SELECTED}" raise ValueError(UpperCamelCase ) # Verify that the target contains no genes besides the ones inside genes variable. __UpperCAmelCase : List[str] = sorted({c for c in target if c not in genes} ) if not_in_genes_list: __UpperCAmelCase : List[Any] = f"{not_in_genes_list} is not in genes list, evolution cannot converge" raise ValueError(UpperCamelCase ) # Generate random starting population. __UpperCAmelCase : Optional[int] = [] for _ in range(UpperCamelCase ): population.append("".join([random.choice(UpperCamelCase ) for i in range(len(UpperCamelCase ) )] ) ) # Just some logs to know what the algorithms is doing. __UpperCAmelCase , __UpperCAmelCase : List[str] = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(UpperCamelCase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. __UpperCAmelCase : Any = [evaluate(UpperCamelCase , UpperCamelCase ) for item in population] # Check if there is a matching evolution. __UpperCAmelCase : int = sorted(UpperCamelCase , key=lambda UpperCamelCase : x[1] , reverse=UpperCamelCase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f"\nGeneration: {generation}" f"\nTotal Population:{total_population}" f"\nBest score: {population_score[0][1]}" f"\nBest string: {population_score[0][0]}" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. __UpperCAmelCase : int = population[: int(N_POPULATION / 3 )] population.clear() population.extend(UpperCamelCase ) # Normalize population score to be between 0 and 1. __UpperCAmelCase : Union[str, Any] = [ (item, score / len(UpperCamelCase )) for item, score in population_score ] # This is selection for i in range(UpperCamelCase ): population.extend(select(population_score[int(UpperCamelCase )] , UpperCamelCase , UpperCamelCase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(UpperCamelCase ) > N_POPULATION: break if __name__ == "__main__": A = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) A = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) A , A , A = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
77
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) A = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
1
"""simple docstring""" from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance A = 6378137.0 A = 6356752.314245 A = 6_378_137 def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: """simple docstring""" __UpperCAmelCase : Dict = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude __UpperCAmelCase : Dict = atan((1 - flattening) * tan(radians(UpperCamelCase ) ) ) __UpperCAmelCase : Optional[int] = atan((1 - flattening) * tan(radians(UpperCamelCase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius __UpperCAmelCase : List[Any] = haversine_distance(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) / EQUATORIAL_RADIUS # Intermediate P and Q values __UpperCAmelCase : str = (b_lata + b_lata) / 2 __UpperCAmelCase : Tuple = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) __UpperCAmelCase : Tuple = (sin(UpperCamelCase ) ** 2) * (cos(UpperCamelCase ) ** 2) __UpperCAmelCase : Optional[Any] = cos(sigma / 2 ) ** 2 __UpperCAmelCase : List[Any] = (sigma - sin(UpperCamelCase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) __UpperCAmelCase : Union[str, Any] = (cos(UpperCamelCase ) ** 2) * (sin(UpperCamelCase ) ** 2) __UpperCAmelCase : List[str] = sin(sigma / 2 ) ** 2 __UpperCAmelCase : Union[str, Any] = (sigma + sin(UpperCamelCase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
77
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a__ ( unittest.TestCase ): def __init__( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any=13 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : int=224 , UpperCamelCase_ : int=30 , UpperCamelCase_ : str=400 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ): """simple docstring""" __UpperCAmelCase : Tuple = size if size is not None else {"height": 18, "width": 18} __UpperCAmelCase : List[Any] = parent __UpperCAmelCase : Tuple = batch_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : List[Any] = image_size __UpperCAmelCase : str = min_resolution __UpperCAmelCase : Tuple = max_resolution __UpperCAmelCase : Optional[Any] = do_resize __UpperCAmelCase : Any = size __UpperCAmelCase : Any = do_normalize __UpperCAmelCase : Any = image_mean __UpperCAmelCase : Optional[Any] = image_std def a_ ( self : str): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ViTImageProcessor if is_vision_available() else None def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Optional[Any] = EfficientFormerImageProcessorTester(self) @property def a_ ( self : Union[str, Any]): """simple docstring""" return self.image_proc_tester.prepare_image_processor_dict() def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase_ , "image_mean")) self.assertTrue(hasattr(UpperCamelCase_ , "image_std")) self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize")) self.assertTrue(hasattr(UpperCamelCase_ , "do_resize")) self.assertTrue(hasattr(UpperCamelCase_ , "size")) def a_ ( self : Dict): """simple docstring""" pass def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __UpperCAmelCase : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image) # Test not batched input __UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray) # Test not batched input __UpperCAmelCase : Tuple = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Any = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __UpperCAmelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor) # Test not batched input __UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
77
1
"""simple docstring""" from __future__ import annotations A = list[list[int]] # assigning initial values to the grid A = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution A = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _UpperCamelCase ( UpperCamelCase ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def _UpperCamelCase ( UpperCamelCase ) -> Matrix | None: """simple docstring""" if location := find_empty_location(UpperCamelCase ): __UpperCAmelCase , __UpperCAmelCase : Dict = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : Dict = digit if sudoku(UpperCamelCase ) is not None: return grid __UpperCAmelCase : Optional[Any] = 0 return None def _UpperCamelCase ( UpperCamelCase ) -> None: """simple docstring""" for row in grid: for cell in row: print(UpperCamelCase , end=" " ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") A = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
77
"""simple docstring""" from collections import namedtuple A = namedtuple("""from_to""", """from_ to""") A = { """cubicmeter""": from_to(1, 1), """litre""": from_to(0.001, 1_000), """kilolitre""": from_to(1, 1), """gallon""": from_to(0.00454, 264.172), """cubicyard""": from_to(0.76455, 1.30795), """cubicfoot""": from_to(0.028, 35.3147), """cup""": from_to(0.000236588, 4226.75), } def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: """simple docstring""" if from_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'from_type' value: {from_type!r} Supported values are:\n" + ", ".join(UpperCamelCase ) ) if to_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n" + ", ".join(UpperCamelCase ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
77
1
"""simple docstring""" import numpy as np from PIL import Image def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : str = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Any = 0 __UpperCAmelCase : Dict = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Tuple = 0 # compute the shape of the output matrix __UpperCAmelCase : Optional[int] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __UpperCAmelCase : List[str] = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __UpperCAmelCase : str = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : int = 0 __UpperCAmelCase : int = 0 return updated_arr def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : List[str] = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : List[str] = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Any = 0 # compute the shape of the output matrix __UpperCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __UpperCAmelCase : str = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __UpperCAmelCase : Tuple = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Optional[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="""avgpooling""", verbose=True) # Loading the image A = Image.open("""path_to_image""") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
77
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ShapEPipeline lowercase_ = ["prompt"] lowercase_ = ["prompt"] lowercase_ = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] lowercase_ = False @property def a_ ( self : Optional[int]): """simple docstring""" return 32 @property def a_ ( self : Any): """simple docstring""" return 32 @property def a_ ( self : int): """simple docstring""" return self.time_input_dim * 4 @property def a_ ( self : List[Any]): """simple docstring""" return 8 @property def a_ ( self : List[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def a_ ( self : List[str]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(UpperCamelCase_) @property def a_ ( self : Any): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Union[str, Any] = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } __UpperCAmelCase : Dict = PriorTransformer(**UpperCamelCase_) return model @property def a_ ( self : Union[str, Any]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Tuple = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } __UpperCAmelCase : List[Any] = ShapERenderer(**UpperCamelCase_) return model def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.dummy_prior __UpperCAmelCase : str = self.dummy_text_encoder __UpperCAmelCase : int = self.dummy_tokenizer __UpperCAmelCase : int = self.dummy_renderer __UpperCAmelCase : Tuple = HeunDiscreteScheduler( beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , ) __UpperCAmelCase : str = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "renderer": renderer, "scheduler": scheduler, } return components def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any=0): """simple docstring""" if str(UpperCamelCase_).startswith("mps"): __UpperCAmelCase : List[Any] = torch.manual_seed(UpperCamelCase_) else: __UpperCAmelCase : str = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_) __UpperCAmelCase : List[Any] = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "np", } return inputs def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : str = "cpu" __UpperCAmelCase : Union[str, Any] = self.get_dummy_components() __UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase_)) __UpperCAmelCase : Union[str, Any] = output.images[0] __UpperCAmelCase : str = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __UpperCAmelCase : Union[str, Any] = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self : Tuple): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2]) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch_device == "cpu" __UpperCAmelCase : Optional[Any] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , ) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.get_dummy_components() __UpperCAmelCase : List[str] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : int = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Any = 2 __UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_) for key in inputs.keys(): if key in self.batch_params: __UpperCAmelCase : List[Any] = batch_size * [inputs[key]] __UpperCAmelCase : List[Any] = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_)[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class a__ ( unittest.TestCase ): def a_ ( self : List[str]): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy") __UpperCAmelCase : Optional[Any] = ShapEPipeline.from_pretrained("openai/shap-e") __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Dict = torch.Generator(device=UpperCamelCase_).manual_seed(0) __UpperCAmelCase : int = pipe( "a shark" , generator=UpperCamelCase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_)
77
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class a__ ( __magic_name__ ): lowercase_ = "Salesforce/blip-image-captioning-base" lowercase_ = ( "This is a tool that generates a description of an image. It takes an input named `image` which should be the " "image to caption, and returns a text that contains the description in English." ) lowercase_ = "image_captioner" lowercase_ = AutoModelForVisionaSeq lowercase_ = ["image"] lowercase_ = ["text"] def __init__( self : Any , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Union[str, Any]): """simple docstring""" requires_backends(self , ["vision"]) super().__init__(*UpperCamelCase_ , **UpperCamelCase_) def a_ ( self : List[str] , UpperCamelCase_ : "Image"): """simple docstring""" return self.pre_processor(images=UpperCamelCase_ , return_tensors="pt") def a_ ( self : Optional[Any] , UpperCamelCase_ : Tuple): """simple docstring""" return self.model.generate(**UpperCamelCase_) def a_ ( self : Any , UpperCamelCase_ : Tuple): """simple docstring""" return self.pre_processor.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_)[0].strip()
77
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_features", "is_longer"] def __init__( self : List[str] , UpperCamelCase_ : Dict=64 , UpperCamelCase_ : Tuple=48000 , UpperCamelCase_ : List[Any]=480 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=1024 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 14000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Union[str, Any] = top_db __UpperCAmelCase : Optional[Any] = truncation __UpperCAmelCase : str = padding __UpperCAmelCase : int = fft_window_size __UpperCAmelCase : str = (fft_window_size >> 1) + 1 __UpperCAmelCase : List[Any] = hop_length __UpperCAmelCase : Optional[Any] = max_length_s __UpperCAmelCase : Tuple = max_length_s * sampling_rate __UpperCAmelCase : str = sampling_rate __UpperCAmelCase : int = frequency_min __UpperCAmelCase : Optional[Any] = frequency_max __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale="htk" , ) __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm="slaney" , mel_scale="slaney" , ) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Dict = copy.deepcopy(self.__dict__) __UpperCAmelCase : str = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a_ ( self : int , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None): """simple docstring""" __UpperCAmelCase : List[Any] = spectrogram( UpperCamelCase_ , window_function(self.fft_window_size , "hann") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel="dB" , ) return log_mel_spectrogram.T def a_ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3) if len(ranges[1]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : str = [0] if len(ranges[2]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : Dict = [0] # randomly choose index for each part __UpperCAmelCase : Dict = np.random.choice(ranges[0]) __UpperCAmelCase : List[str] = np.random.choice(ranges[1]) __UpperCAmelCase : List[Any] = np.random.choice(ranges[2]) __UpperCAmelCase : List[Any] = mel[idx_front : idx_front + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_middle : idx_middle + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_back : idx_back + chunk_frames, :] __UpperCAmelCase : Tuple = torch.tensor(mel[None, None, :]) __UpperCAmelCase : Union[str, Any] = torch.nn.functional.interpolate( UpperCamelCase_ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = mel_shrink[0][0].numpy() __UpperCAmelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0) return mel_fusion def a_ ( self : Optional[Any] , UpperCamelCase_ : np.array , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]): """simple docstring""" if waveform.shape[0] > max_length: if truncation == "rand_trunc": __UpperCAmelCase : List[str] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad __UpperCAmelCase : List[Any] = len(UpperCamelCase_) - max_length __UpperCAmelCase : int = np.random.randint(0 , overflow + 1) __UpperCAmelCase : Union[str, Any] = waveform[idx : idx + max_length] __UpperCAmelCase : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] elif truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed __UpperCAmelCase : Tuple = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. __UpperCAmelCase : List[str] = np.stack([mel, mel, mel, mel] , axis=0) __UpperCAmelCase : Any = False else: __UpperCAmelCase : List[str] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = True else: raise NotImplementedError(F"data_truncating {truncation} not implemented") else: __UpperCAmelCase : Optional[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": __UpperCAmelCase : Tuple = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1))[:max_length] if padding == "repeatpad": __UpperCAmelCase : Union[str, Any] = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : Optional[Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : int = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0) if truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0) else: __UpperCAmelCase : Optional[int] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] return input_mel, longer def __call__( self : Dict , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Any , ): """simple docstring""" __UpperCAmelCase : int = truncation if truncation is not None else self.truncation __UpperCAmelCase : Optional[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" F" was sampled with {self.sampling_rate} and not {sampling_rate}.") else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug.") __UpperCAmelCase : List[str] = isinstance(UpperCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}") __UpperCAmelCase : str = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: __UpperCAmelCase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray): __UpperCAmelCase : Tuple = np.asarray(UpperCamelCase_ , dtype=np.floataa) elif isinstance(UpperCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): __UpperCAmelCase : Optional[int] = raw_speech.astype(np.floataa) # always return batch if not is_batched: __UpperCAmelCase : int = [np.asarray(UpperCamelCase_)] # convert to mel spectrogram, truncate and pad if needed. __UpperCAmelCase : Optional[int] = [ self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_) for waveform in raw_speech ] __UpperCAmelCase : Tuple = [] __UpperCAmelCase : List[Any] = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase_) is_longer.append(UpperCamelCase_) if truncation == "fusion" and sum(UpperCamelCase_) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer __UpperCAmelCase : Any = np.random.randint(0 , len(UpperCamelCase_)) __UpperCAmelCase : Optional[int] = True if isinstance(input_mel[0] , UpperCamelCase_): __UpperCAmelCase : Tuple = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for feature in input_mel] # is_longer is a list of bool __UpperCAmelCase : List[str] = [[longer] for longer in is_longer] __UpperCAmelCase : Optional[int] = {"input_features": input_mel, "is_longer": is_longer} __UpperCAmelCase : Optional[int] = BatchFeature(UpperCamelCase_) if return_tensors is not None: __UpperCAmelCase : Any = input_features.convert_to_tensors(UpperCamelCase_) return input_features
77
1
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class a__ ( nn.Module ): def __init__( self : Union[str, Any]): """simple docstring""" super().__init__() __UpperCAmelCase : Optional[int] = nn.Linear(3 , 4) __UpperCAmelCase : str = nn.BatchNormad(4) __UpperCAmelCase : int = nn.Linear(4 , 5) def a_ ( self : str , UpperCamelCase_ : List[str]): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(UpperCamelCase_))) class a__ ( unittest.TestCase ): def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Optional[Any] = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , model.state_dict()) __UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase_ , "index.json") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: __UpperCAmelCase : Optional[int] = os.path.join(UpperCamelCase_ , F"{key}.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on the fact weights are properly loaded def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : int = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: __UpperCAmelCase : List[Any] = torch.randn(2 , 3 , dtype=UpperCamelCase_) with TemporaryDirectory() as tmp_dir: __UpperCAmelCase : Tuple = offload_weight(UpperCamelCase_ , "weight" , UpperCamelCase_ , {}) __UpperCAmelCase : Dict = os.path.join(UpperCamelCase_ , "weight.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) self.assertDictEqual(UpperCamelCase_ , {"weight": {"shape": [2, 3], "dtype": str(UpperCamelCase_).split(".")[1]}}) __UpperCAmelCase : Optional[Any] = load_offloaded_weight(UpperCamelCase_ , index["weight"]) self.assertTrue(torch.equal(UpperCamelCase_ , UpperCamelCase_)) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : List[Any] = ModelForTest() __UpperCAmelCase : Optional[int] = model.state_dict() __UpperCAmelCase : List[str] = {k: v for k, v in state_dict.items() if "linear2" not in k} __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "linear2" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : List[str] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "weight" in k} __UpperCAmelCase : Optional[Any] = {k: v for k, v in state_dict.items() if "weight" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[Any] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) # Duplicates are removed __UpperCAmelCase : str = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Any = {"a.1": 0, "a.10": 1, "a.2": 2} __UpperCAmelCase : Union[str, Any] = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1": 0, "a.2": 2}) __UpperCAmelCase : int = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2} __UpperCAmelCase : int = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1.a": 0, "a.2.a": 2})
77
"""simple docstring""" import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[str]="<pad>" , UpperCamelCase_ : Union[str, Any]=125 , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: __UpperCAmelCase : int = [F"<extra_id_{i}>" for i in range(UpperCamelCase_)] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens __UpperCAmelCase : Dict = len(set(filter(lambda UpperCamelCase_: bool("extra_id" in str(UpperCamelCase_)) , UpperCamelCase_))) if extra_tokens != extra_ids: raise ValueError( F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to ByT5Tokenizer. In this case the additional_special_tokens must include the" " extra_ids tokens") __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else pad_token __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else eos_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else unk_token super().__init__( eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , extra_ids=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : List[str] = extra_ids __UpperCAmelCase : int = 2**8 # utf is 8 bits # define special tokens dict __UpperCAmelCase : Dict[int, str] = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } __UpperCAmelCase : Any = len(self.special_tokens_encoder) __UpperCAmelCase : List[Any] = len(UpperCamelCase_) for i, token in enumerate(UpperCamelCase_): __UpperCAmelCase : Union[str, Any] = self.vocab_size + i - n __UpperCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()} @property def a_ ( self : List[Any]): """simple docstring""" return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def a_ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCamelCase_)) + [1] return ([0] * len(UpperCamelCase_)) + [1] + ([0] * len(UpperCamelCase_)) + [1] def a_ ( self : Optional[Any] , UpperCamelCase_ : List[int]): """simple docstring""" if len(UpperCamelCase_) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" " eos tokens being added.") return token_ids else: return token_ids + [self.eos_token_id] def a_ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Dict = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos) * [0] return len(token_ids_a + eos + token_ids_a + eos) * [0] def a_ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Optional[Any] = self._add_eos_if_not_present(UpperCamelCase_) if token_ids_a is None: return token_ids_a else: __UpperCAmelCase : List[Any] = self._add_eos_if_not_present(UpperCamelCase_) return token_ids_a + token_ids_a def a_ ( self : List[str] , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : Any = [chr(UpperCamelCase_) for i in text.encode("utf-8")] return tokens def a_ ( self : Tuple , UpperCamelCase_ : List[Any]): """simple docstring""" if token in self.special_tokens_encoder: __UpperCAmelCase : Any = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: __UpperCAmelCase : int = self.added_tokens_encoder[token] elif len(UpperCamelCase_) != 1: __UpperCAmelCase : Optional[Any] = self.unk_token_id else: __UpperCAmelCase : Any = ord(UpperCamelCase_) + self._num_special_tokens return token_id def a_ ( self : Any , UpperCamelCase_ : List[str]): """simple docstring""" if index in self.special_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[index] else: __UpperCAmelCase : List[str] = chr(index - self._num_special_tokens) return token def a_ ( self : Dict , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : str = b"" for token in tokens: if token in self.special_tokens_decoder: __UpperCAmelCase : Tuple = self.special_tokens_decoder[token].encode("utf-8") elif token in self.added_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[token].encode("utf-8") elif token in self.special_tokens_encoder: __UpperCAmelCase : Optional[int] = token.encode("utf-8") elif token in self.added_tokens_encoder: __UpperCAmelCase : Optional[Any] = token.encode("utf-8") else: __UpperCAmelCase : Any = bytes([ord(UpperCamelCase_)]) bstring += tok_string __UpperCAmelCase : List[Any] = bstring.decode("utf-8" , errors="ignore") return string def a_ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" return ()
77
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A = logging.get_logger(__name__) A = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class a__ ( __magic_name__ ): lowercase_ = "gptsan-japanese" lowercase_ = [ "past_key_values", ] lowercase_ = { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Any , UpperCamelCase_ : List[Any]=36000 , UpperCamelCase_ : Optional[Any]=1280 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Dict=8192 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Union[str, Any]=128 , UpperCamelCase_ : Dict=10 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : Union[str, Any]=16 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : List[str]=128 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : str=1e-5 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Dict=0.0 , UpperCamelCase_ : Tuple="float32" , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : int=0.002 , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : List[Any]=35998 , UpperCamelCase_ : str=35995 , UpperCamelCase_ : Optional[int]=35999 , **UpperCamelCase_ : Dict , ): """simple docstring""" __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : str = d_model __UpperCAmelCase : Optional[Any] = d_ff __UpperCAmelCase : Optional[Any] = d_ext __UpperCAmelCase : Any = d_spout __UpperCAmelCase : Dict = num_switch_layers __UpperCAmelCase : Any = num_ext_layers __UpperCAmelCase : List[str] = num_switch_layers + num_ext_layers __UpperCAmelCase : List[str] = num_heads __UpperCAmelCase : Optional[int] = num_experts __UpperCAmelCase : List[Any] = expert_capacity __UpperCAmelCase : List[str] = dropout_rate __UpperCAmelCase : Union[str, Any] = layer_norm_epsilon __UpperCAmelCase : List[Any] = router_bias __UpperCAmelCase : Tuple = router_jitter_noise __UpperCAmelCase : Any = router_dtype __UpperCAmelCase : List[str] = router_ignore_padding_tokens __UpperCAmelCase : int = output_hidden_states __UpperCAmelCase : Dict = output_attentions __UpperCAmelCase : List[Any] = initializer_factor __UpperCAmelCase : Dict = output_router_logits __UpperCAmelCase : Optional[int] = use_cache super().__init__( separator_token_id=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
77
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a__ ( unittest.TestCase ): def __init__( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=[10, 20, 30, 40] , UpperCamelCase_ : Tuple=[1, 1, 2, 1] , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict="relu" , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=None , ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : List[str] = image_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : Union[str, Any] = embeddings_size __UpperCAmelCase : Dict = hidden_sizes __UpperCAmelCase : Dict = depths __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : str = num_labels __UpperCAmelCase : Optional[int] = scope __UpperCAmelCase : Dict = len(UpperCamelCase_) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __UpperCAmelCase : Dict = self.get_config() return config, pixel_values def a_ ( self : Dict): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def a_ ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]): """simple docstring""" __UpperCAmelCase : List[str] = FlaxRegNetModel(config=UpperCamelCase_) __UpperCAmelCase : Dict = model(UpperCamelCase_) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a_ ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : List[Any] = self.num_labels __UpperCAmelCase : Tuple = FlaxRegNetForImageClassification(config=UpperCamelCase_) __UpperCAmelCase : str = model(UpperCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Any = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () lowercase_ = False lowercase_ = False lowercase_ = False def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = FlaxRegNetModelTester(self) __UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_) def a_ ( self : Dict): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a_ ( self : Tuple): """simple docstring""" return def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_) @unittest.skip(reason="RegNet does not use inputs_embeds") def a_ ( self : Union[str, Any]): """simple docstring""" pass @unittest.skip(reason="RegNet does not support input and output embeddings") def a_ ( self : Optional[int]): """simple docstring""" pass def a_ ( self : str): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : int = model_class(UpperCamelCase_) __UpperCAmelCase : Optional[int] = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Any = [*signature.parameters.keys()] __UpperCAmelCase : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase_) def a_ ( self : int): """simple docstring""" def check_hidden_states_output(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]): __UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : str = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase_) , expected_num_stages + 1) __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : List[str] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Optional[int] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): __UpperCAmelCase : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_) @jax.jit def model_jitted(UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int]): return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_) with self.subTest("JIT Enabled"): __UpperCAmelCase : Optional[Any] = model_jitted(**UpperCamelCase_).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): __UpperCAmelCase : Dict = model_jitted(**UpperCamelCase_).to_tuple() self.assertEqual(len(UpperCamelCase_) , len(UpperCamelCase_)) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_): self.assertEqual(jitted_output.shape , output.shape) def _UpperCamelCase ( ) -> Any: """simple docstring""" __UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_flax class a__ ( unittest.TestCase ): @cached_property def a_ ( self : Optional[int]): """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None @slow def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040") __UpperCAmelCase : Dict = self.default_image_processor __UpperCAmelCase : str = prepare_img() __UpperCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors="np") __UpperCAmelCase : Dict = model(**UpperCamelCase_) # verify the logits __UpperCAmelCase : Dict = (1, 1000) self.assertEqual(outputs.logits.shape , UpperCamelCase_) __UpperCAmelCase : Any = jnp.array([-0.4180, -1.5051, -3.4836]) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4))
77
1
"""simple docstring""" from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
77
"""simple docstring""" from scipy.stats import spearmanr import datasets A = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ A = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ A = r"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def a_ ( self : Any): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float"), "references": datasets.Value("float"), }) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def a_ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False): """simple docstring""" __UpperCAmelCase : List[str] = spearmanr(UpperCamelCase_ , UpperCamelCase_) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
77
1
"""simple docstring""" import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a__ ( __magic_name__ ): def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : str = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(UpperCamelCase_ , "width_multiplier")) class a__ : def __init__( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : str=13 , UpperCamelCase_ : Optional[Any]=64 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : Dict="swish" , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[Any]=32 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : int=True , UpperCamelCase_ : Union[str, Any]=10 , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[int]=0.25 , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : Optional[Any]=0.0 , ): """simple docstring""" __UpperCAmelCase : Optional[int] = parent __UpperCAmelCase : Tuple = batch_size __UpperCAmelCase : Union[str, Any] = image_size __UpperCAmelCase : Any = patch_size __UpperCAmelCase : int = num_channels __UpperCAmelCase : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8) __UpperCAmelCase : Union[str, Any] = hidden_act __UpperCAmelCase : Union[str, Any] = conv_kernel_size __UpperCAmelCase : List[Any] = output_stride __UpperCAmelCase : int = classifier_dropout_prob __UpperCAmelCase : Any = use_labels __UpperCAmelCase : List[Any] = is_training __UpperCAmelCase : Any = num_labels __UpperCAmelCase : str = initializer_range __UpperCAmelCase : Union[str, Any] = scope __UpperCAmelCase : int = width_multiplier __UpperCAmelCase : Optional[int] = ffn_dropout __UpperCAmelCase : Optional[int] = attn_dropout def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __UpperCAmelCase : str = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels) __UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels) __UpperCAmelCase : Any = self.get_config() return config, pixel_values, labels, pixel_labels def a_ ( self : Union[str, Any]): """simple docstring""" return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : str = MobileViTVaModel(config=UpperCamelCase_) model.to(UpperCamelCase_) model.eval() __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def a_ ( self : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Dict = self.num_labels __UpperCAmelCase : int = MobileViTVaForImageClassification(UpperCamelCase_) model.to(UpperCamelCase_) model.eval() __UpperCAmelCase : Any = model(UpperCamelCase_ , labels=UpperCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def a_ ( self : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple): """simple docstring""" __UpperCAmelCase : Any = self.num_labels __UpperCAmelCase : List[str] = MobileViTVaForSemanticSegmentation(UpperCamelCase_) model.to(UpperCamelCase_) model.eval() __UpperCAmelCase : Tuple = model(UpperCamelCase_) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ , labels=UpperCamelCase_) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : str = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class a__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase_ = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) lowercase_ = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Dict = MobileViTVaModelTester(self) __UpperCAmelCase : List[Any] = MobileViTVaConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_) def a_ ( self : Union[str, Any]): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="MobileViTV2 does not use inputs_embeds") def a_ ( self : Optional[int]): """simple docstring""" pass @unittest.skip(reason="MobileViTV2 does not support input and output embeddings") def a_ ( self : List[Any]): """simple docstring""" pass @unittest.skip(reason="MobileViTV2 does not output attentions") def a_ ( self : List[Any]): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run.") def a_ ( self : str): """simple docstring""" pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.") def a_ ( self : List[str]): """simple docstring""" pass def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : List[Any] = model_class(UpperCamelCase_) __UpperCAmelCase : Dict = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : List[Any] = [*signature.parameters.keys()] __UpperCAmelCase : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase_) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_) def a_ ( self : Optional[int]): """simple docstring""" def check_hidden_states_output(UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int]): __UpperCAmelCase : str = model_class(UpperCamelCase_) model.to(UpperCamelCase_) model.eval() with torch.no_grad(): __UpperCAmelCase : Dict = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : Tuple = outputs.hidden_states __UpperCAmelCase : Tuple = 5 self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __UpperCAmelCase : Dict = 2 for i in range(len(UpperCamelCase_)): self.assertListEqual( list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2) __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Union[str, Any] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Union[str, Any] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_) def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_) @slow def a_ ( self : Dict): """simple docstring""" for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : List[str] = MobileViTVaModel.from_pretrained(UpperCamelCase_) self.assertIsNotNone(UpperCamelCase_) def _UpperCamelCase ( ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class a__ ( unittest.TestCase ): @cached_property def a_ ( self : Optional[int]): """simple docstring""" return ( MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256") if is_vision_available() else None ) @slow def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256").to( UpperCamelCase_) __UpperCAmelCase : List[str] = self.default_image_processor __UpperCAmelCase : str = prepare_img() __UpperCAmelCase : Union[str, Any] = image_processor(images=UpperCamelCase_ , return_tensors="pt").to(UpperCamelCase_) # forward pass with torch.no_grad(): __UpperCAmelCase : Union[str, Any] = model(**UpperCamelCase_) # verify the logits __UpperCAmelCase : str = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape , UpperCamelCase_) __UpperCAmelCase : List[str] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01]).to(UpperCamelCase_) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4)) @slow def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3") __UpperCAmelCase : int = model.to(UpperCamelCase_) __UpperCAmelCase : Dict = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3") __UpperCAmelCase : Dict = prepare_img() __UpperCAmelCase : Optional[Any] = image_processor(images=UpperCamelCase_ , return_tensors="pt").to(UpperCamelCase_) # forward pass with torch.no_grad(): __UpperCAmelCase : List[Any] = model(**UpperCamelCase_) __UpperCAmelCase : Tuple = outputs.logits # verify the logits __UpperCAmelCase : int = torch.Size((1, 21, 32, 32)) self.assertEqual(logits.shape , UpperCamelCase_) __UpperCAmelCase : List[str] = torch.tensor( [ [[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]], [[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]], [[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]], ] , device=UpperCamelCase_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-4)) @slow def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3") __UpperCAmelCase : Union[str, Any] = model.to(UpperCamelCase_) __UpperCAmelCase : int = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3") __UpperCAmelCase : List[str] = prepare_img() __UpperCAmelCase : Dict = image_processor(images=UpperCamelCase_ , return_tensors="pt").to(UpperCamelCase_) # forward pass with torch.no_grad(): __UpperCAmelCase : List[str] = model(**UpperCamelCase_) __UpperCAmelCase : int = outputs.logits.detach().cpu() __UpperCAmelCase : int = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ , target_sizes=[(50, 60)]) __UpperCAmelCase : Tuple = torch.Size((50, 60)) self.assertEqual(segmentation[0].shape , UpperCamelCase_) __UpperCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_) __UpperCAmelCase : Tuple = torch.Size((32, 32)) self.assertEqual(segmentation[0].shape , UpperCamelCase_)
77
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) A = {"""vocab_file""": """spiece.model"""} A = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } A = {"""bert_for_seq_generation""": 512} class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = [] lowercase_ = ["input_ids", "attention_mask"] def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="<::::>" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : List[Any] , ): """simple docstring""" __UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __UpperCAmelCase : Dict = vocab_file __UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(UpperCamelCase_) @property def a_ ( self : List[str]): """simple docstring""" return self.sp_model.get_piece_size() def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : int = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : int): """simple docstring""" __UpperCAmelCase : Optional[int] = self.__dict__.copy() __UpperCAmelCase : List[Any] = None return state def __setstate__( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs"): __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def a_ ( self : Any , UpperCamelCase_ : str): """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_) def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" return self.sp_model.piece_to_id(UpperCamelCase_) def a_ ( self : Tuple , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : int = self.sp_model.IdToPiece(UpperCamelCase_) return token def a_ ( self : Dict , UpperCamelCase_ : Optional[Any]): """simple docstring""" __UpperCAmelCase : int = [] __UpperCAmelCase : Tuple = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(UpperCamelCase_) + token __UpperCAmelCase : List[Any] = [] else: current_sub_tokens.append(UpperCamelCase_) out_string += self.sp_model.decode(UpperCamelCase_) return out_string.strip() def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : Tuple = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCamelCase_) elif not os.path.isfile(self.vocab_file): with open(UpperCamelCase_ , "wb") as fi: __UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_) return (out_vocab_file,)
77
1
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase ) -> float: """simple docstring""" __UpperCAmelCase : List[Any] = 0 while len(UpperCamelCase ) > 1: __UpperCAmelCase : Optional[int] = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): __UpperCAmelCase : Dict = files.index(min(UpperCamelCase ) ) temp += files[min_index] files.pop(UpperCamelCase ) files.append(UpperCamelCase ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
77
"""simple docstring""" import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed A = """true""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=16 ) -> Tuple: """simple docstring""" set_seed(42 ) __UpperCAmelCase : Dict = RegressionModel() __UpperCAmelCase : Optional[Any] = deepcopy(UpperCamelCase ) __UpperCAmelCase : Any = RegressionDataset(length=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = DataLoader(UpperCamelCase , batch_size=UpperCamelCase ) model.to(accelerator.device ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.prepare(UpperCamelCase , UpperCamelCase ) return model, ddp_model, dataloader def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=False ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) __UpperCAmelCase : Dict = load_dataset("glue" , "mrpc" , split="validation" ) def tokenize_function(UpperCamelCase ): __UpperCAmelCase : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase , max_length=UpperCamelCase ) return outputs with accelerator.main_process_first(): __UpperCAmelCase : str = dataset.map( UpperCamelCase , batched=UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) __UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(UpperCamelCase ): if use_longest: return tokenizer.pad(UpperCamelCase , padding="longest" , return_tensors="pt" ) return tokenizer.pad(UpperCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" ) return DataLoader(UpperCamelCase , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=16 ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[Any] = Accelerator(dispatch_batches=UpperCamelCase , split_batches=UpperCamelCase ) __UpperCAmelCase : int = get_dataloader(UpperCamelCase , not dispatch_batches ) __UpperCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" , return_dict=UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(UpperCamelCase , UpperCamelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Dict = [] for batch in dataloader: __UpperCAmelCase , __UpperCAmelCase : int = batch.values() with torch.no_grad(): __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = [], [] for logit, targ in logits_and_targets: logits.append(UpperCamelCase ) targs.append(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = torch.cat(UpperCamelCase ), torch.cat(UpperCamelCase ) return logits, targs def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=16 ) -> int: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = get_basic_setup(UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = generate_predictions(UpperCamelCase , UpperCamelCase , UpperCamelCase ) assert ( len(UpperCamelCase ) == num_samples ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCamelCase )}" def _UpperCamelCase ( UpperCamelCase = False , UpperCamelCase = False ) -> List[str]: """simple docstring""" __UpperCAmelCase : List[str] = evaluate.load("glue" , "mrpc" ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = get_mrpc_setup(UpperCamelCase , UpperCamelCase ) # First do baseline __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = setup["no"] model.to(UpperCamelCase ) model.eval() for batch in dataloader: batch.to(UpperCamelCase ) with torch.inference_mode(): __UpperCAmelCase : List[str] = model(**UpperCamelCase ) __UpperCAmelCase : str = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=UpperCamelCase , references=batch["labels"] ) __UpperCAmelCase : str = metric.compute() # Then do distributed __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): __UpperCAmelCase : Any = model(**UpperCamelCase ) __UpperCAmelCase : str = outputs.logits.argmax(dim=-1 ) __UpperCAmelCase : Union[str, Any] = batch["labels"] __UpperCAmelCase , __UpperCAmelCase : Any = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=UpperCamelCase , references=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n" def _UpperCamelCase ( ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Dict = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" ) test_mrpc(UpperCamelCase , UpperCamelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: __UpperCAmelCase : Union[str, Any] = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase ) if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" ) test_torch_metrics(UpperCamelCase , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) __UpperCAmelCase : Any = Accelerator() test_torch_metrics(UpperCamelCase , 512 ) accelerator.state._reset_state() def _UpperCamelCase ( UpperCamelCase ) -> Optional[Any]: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
77
1
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str: """simple docstring""" __UpperCAmelCase : list[list[str]] = [[] for _ in range(UpperCamelCase )] __UpperCAmelCase : Union[str, Any] = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1 or len(UpperCamelCase ) <= key: return input_string for position, character in enumerate(UpperCamelCase ): __UpperCAmelCase : Dict = position % (lowest * 2) # puts it in bounds __UpperCAmelCase : List[str] = min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = ["".join(UpperCamelCase ) for row in temp_grid] __UpperCAmelCase : Any = "".join(UpperCamelCase ) return output_string def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str: """simple docstring""" __UpperCAmelCase : Tuple = [] __UpperCAmelCase : Union[str, Any] = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1: return input_string __UpperCAmelCase : list[list[str]] = [[] for _ in range(UpperCamelCase )] # generates template for position in range(len(UpperCamelCase ) ): __UpperCAmelCase : Optional[int] = position % (lowest * 2) # puts it in bounds __UpperCAmelCase : str = min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("*" ) __UpperCAmelCase : Union[str, Any] = 0 for row in temp_grid: # fills in the characters __UpperCAmelCase : Tuple = input_string[counter : counter + len(UpperCamelCase )] grid.append(list(UpperCamelCase ) ) counter += len(UpperCamelCase ) __UpperCAmelCase : List[str] = "" # reads as zigzag for position in range(len(UpperCamelCase ) ): __UpperCAmelCase : Dict = position % (lowest * 2) # puts it in bounds __UpperCAmelCase : Union[str, Any] = min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def _UpperCamelCase ( UpperCamelCase ) -> dict[int, str]: """simple docstring""" __UpperCAmelCase : Tuple = {} for key_guess in range(1 , len(UpperCamelCase ) ): # tries every key __UpperCAmelCase : str = decrypt(UpperCamelCase , UpperCamelCase ) return results if __name__ == "__main__": import doctest doctest.testmod()
77
"""simple docstring""" import math def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = 0 , UpperCamelCase = 0 ) -> list: """simple docstring""" __UpperCAmelCase : Union[str, Any] = end or len(UpperCamelCase ) for i in range(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : List[Any] = i __UpperCAmelCase : Any = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __UpperCAmelCase : Dict = array[temp_index - 1] temp_index -= 1 __UpperCAmelCase : str = temp_index_value return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None: # Max Heap """simple docstring""" __UpperCAmelCase : Optional[Any] = index __UpperCAmelCase : List[str] = 2 * index + 1 # Left Node __UpperCAmelCase : Union[str, Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __UpperCAmelCase : Tuple = left_index if right_index < heap_size and array[largest] < array[right_index]: __UpperCAmelCase : int = right_index if largest != index: __UpperCAmelCase , __UpperCAmelCase : List[str] = array[largest], array[index] heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" __UpperCAmelCase : List[Any] = len(UpperCamelCase ) for i in range(n // 2 , -1 , -1 ): heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) for i in range(n - 1 , 0 , -1 ): __UpperCAmelCase , __UpperCAmelCase : int = array[0], array[i] heapify(UpperCamelCase , 0 , UpperCamelCase ) return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Optional[Any] = low __UpperCAmelCase : List[str] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __UpperCAmelCase , __UpperCAmelCase : Optional[int] = array[j], array[i] i += 1 def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" if len(UpperCamelCase ) == 0: return array __UpperCAmelCase : Optional[int] = 2 * math.ceil(math.loga(len(UpperCamelCase ) ) ) __UpperCAmelCase : List[Any] = 16 return intro_sort(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(UpperCamelCase ) max_depth -= 1 __UpperCAmelCase : List[Any] = median_of_a(UpperCamelCase , UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 ) __UpperCAmelCase : Union[str, Any] = partition(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) intro_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = p return insertion_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() A = input("""Enter numbers separated by a comma : """).strip() A = [float(item) for item in user_input.split(""",""")] print(sort(unsorted))
77
1
"""simple docstring""" from __future__ import annotations import math def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> list: """simple docstring""" if len(UpperCamelCase ) != 2 or len(a[0] ) != 2 or len(UpperCamelCase ) != 2 or len(b[0] ) != 2: raise Exception("Matrices are not 2x2" ) __UpperCAmelCase : Optional[Any] = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Dict: """simple docstring""" return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(UpperCamelCase ) ) ] def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[Any]: """simple docstring""" return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(UpperCamelCase ) ) ] def _UpperCamelCase ( UpperCamelCase ) -> tuple[list, list, list, list]: """simple docstring""" if len(UpperCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception("Odd matrices are not supported!" ) __UpperCAmelCase : List[str] = len(UpperCamelCase ) __UpperCAmelCase : int = matrix_length // 2 __UpperCAmelCase : Tuple = [[a[i][j] for j in range(UpperCamelCase , UpperCamelCase )] for i in range(UpperCamelCase )] __UpperCAmelCase : Dict = [ [a[i][j] for j in range(UpperCamelCase , UpperCamelCase )] for i in range(UpperCamelCase , UpperCamelCase ) ] __UpperCAmelCase : List[Any] = [[a[i][j] for j in range(UpperCamelCase )] for i in range(UpperCamelCase )] __UpperCAmelCase : Any = [[a[i][j] for j in range(UpperCamelCase )] for i in range(UpperCamelCase , UpperCamelCase )] return top_left, top_right, bot_left, bot_right def _UpperCamelCase ( UpperCamelCase ) -> tuple[int, int]: """simple docstring""" return len(UpperCamelCase ), len(matrix[0] ) def _UpperCamelCase ( UpperCamelCase ) -> None: """simple docstring""" print("\n".join(str(UpperCamelCase ) for line in matrix ) ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> list: """simple docstring""" if matrix_dimensions(UpperCamelCase ) == (2, 2): return default_matrix_multiplication(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = split_matrix(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = split_matrix(UpperCamelCase ) __UpperCAmelCase : Dict = actual_strassen(UpperCamelCase , matrix_subtraction(UpperCamelCase , UpperCamelCase ) ) __UpperCAmelCase : str = actual_strassen(matrix_addition(UpperCamelCase , UpperCamelCase ) , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = actual_strassen(matrix_addition(UpperCamelCase , UpperCamelCase ) , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = actual_strassen(UpperCamelCase , matrix_subtraction(UpperCamelCase , UpperCamelCase ) ) __UpperCAmelCase : Optional[Any] = actual_strassen(matrix_addition(UpperCamelCase , UpperCamelCase ) , matrix_addition(UpperCamelCase , UpperCamelCase ) ) __UpperCAmelCase : str = actual_strassen(matrix_subtraction(UpperCamelCase , UpperCamelCase ) , matrix_addition(UpperCamelCase , UpperCamelCase ) ) __UpperCAmelCase : Dict = actual_strassen(matrix_subtraction(UpperCamelCase , UpperCamelCase ) , matrix_addition(UpperCamelCase , UpperCamelCase ) ) __UpperCAmelCase : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(UpperCamelCase , UpperCamelCase ) , UpperCamelCase ) , UpperCamelCase ) __UpperCAmelCase : Dict = matrix_addition(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[str] = matrix_addition(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = matrix_subtraction(matrix_subtraction(matrix_addition(UpperCamelCase , UpperCamelCase ) , UpperCamelCase ) , UpperCamelCase ) # construct the new matrix from our 4 quadrants __UpperCAmelCase : str = [] for i in range(len(UpperCamelCase ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(UpperCamelCase ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> list: """simple docstring""" if matrix_dimensions(UpperCamelCase )[1] != matrix_dimensions(UpperCamelCase )[0]: __UpperCAmelCase : List[str] = ( "Unable to multiply these matrices, please check the dimensions.\n" f"Matrix A: {matrixa}\n" f"Matrix B: {matrixa}" ) raise Exception(UpperCamelCase ) __UpperCAmelCase : int = matrix_dimensions(UpperCamelCase ) __UpperCAmelCase : Any = matrix_dimensions(UpperCamelCase ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __UpperCAmelCase : Optional[int] = max(*UpperCamelCase , *UpperCamelCase ) __UpperCAmelCase : Optional[Any] = int(math.pow(2 , math.ceil(math.loga(UpperCamelCase ) ) ) ) __UpperCAmelCase : List[str] = matrixa __UpperCAmelCase : Union[str, Any] = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , UpperCamelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , UpperCamelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , UpperCamelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) __UpperCAmelCase : str = actual_strassen(UpperCamelCase , UpperCamelCase ) # Removing the additional zeros for i in range(0 , UpperCamelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , UpperCamelCase ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": A = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] A = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
77
"""simple docstring""" import numpy as np from PIL import Image def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : str = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Any = 0 __UpperCAmelCase : Dict = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Tuple = 0 # compute the shape of the output matrix __UpperCAmelCase : Optional[int] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __UpperCAmelCase : List[str] = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __UpperCAmelCase : str = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : int = 0 __UpperCAmelCase : int = 0 return updated_arr def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : List[str] = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : List[str] = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Any = 0 # compute the shape of the output matrix __UpperCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __UpperCAmelCase : str = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __UpperCAmelCase : Tuple = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Optional[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="""avgpooling""", verbose=True) # Loading the image A = Image.open("""path_to_image""") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
77
1
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: A = None A = logging.get_logger(__name__) A = """▁""" A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} A = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } A = { """google/pegasus-xsum""": 512, } class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PegasusTokenizer lowercase_ = ["input_ids", "attention_mask"] def __init__( self : str , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Any="<unk>" , UpperCamelCase_ : Tuple="<mask_2>" , UpperCamelCase_ : Any="<mask_1>" , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : str=103 , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" __UpperCAmelCase : Optional[int] = offset if additional_special_tokens is not None: if not isinstance(UpperCamelCase_ , UpperCamelCase_): raise TypeError( F"additional_special_tokens should be of type {type(UpperCamelCase_)}, but is" F" {type(UpperCamelCase_)}") __UpperCAmelCase : Any = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F"<unk_{i}>" for i in range(len(UpperCamelCase_) , self.offset - 1) ] if len(set(UpperCamelCase_)) != len(UpperCamelCase_): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.") __UpperCAmelCase : str = additional_special_tokens_extended else: __UpperCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset)] super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , pad_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = vocab_file __UpperCAmelCase : List[str] = False if not self.vocab_file else True def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : int = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens) + 3)): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" F" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}") return [1 if x in all_special_ids else 0 for x in seq] def a_ ( self : Union[str, Any] , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return self._special_token_mask(UpperCamelCase_) elif token_ids_a is None: return self._special_token_mask(UpperCamelCase_) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a) + [1] def a_ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None): """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def a_ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_): copyfile(self.vocab_file , UpperCamelCase_) return (out_vocab_file,)
77
1